text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
import argparse
import math
import random
import re
import sys
MIN_ENTROPY = 44
def choose(words, nb):
rng = random.SystemRandom()
return [rng.choice(words) for i in range(nb)]
class Wordlist():
def __init__(self, words):
self.words = list(words)
self._len = len(self.words)
self.entropy_per_word = math.log(self._len, 2)
def __len__(self):
return self._len
def __getitem__(self, key):
return self.words[key]
def wordlist_from_file(wordfile):
is_diceware = bool(re.fullmatch('[1-6]+\s[\w \'-]+', wordfile.readline().strip()))
wordfile.seek(0)
if is_diceware:
return Wordlist(word.strip().split(maxsplit=1)[1] for word in wordfile)
return Wordlist(word.strip() for word in wordfile)
def print_passphrase(words, entropy, sep=' ', verbose=False):
passphrase = sep.join(words)
# entropy could be less than expected if it's really short (1-2 chars
# words). Calculate real entropy, assuming [a-z] only words.
entropy = min(len(passphrase) * math.log(26, 2), entropy)
if entropy < MIN_ENTROPY:
print('Warning: the passphrase entropy of {:2.1f} bits seems very low.'.format(entropy),
file=sys.stderr)
if verbose:
print('Passphrase entropy: {:2.1f}'.format(entropy), file=sys.stderr)
print(passphrase)
def rolldice(wordfile, entropy=None, nb_words=None):
wordlist = wordlist_from_file(wordfile)
if nb_words is None:
nb_words = math.ceil(entropy / wordlist.entropy_per_word)
entropy = nb_words * wordlist.entropy_per_word
return choose(wordlist, nb_words), entropy
def main():
parser = argparse.ArgumentParser()
parser.add_argument('wordfile', type=argparse.FileType('r'))
group = parser.add_mutually_exclusive_group()
group.add_argument('-e', '--entropy', type=int, default=56)
group.add_argument('nb_words', type=int, nargs='?', default=None)
parser.add_argument('-s', '--separator', default=' ')
parser.add_argument('-v', '--verbose', action="store_true")
args = parser.parse_args()
words, entropy = rolldice(args.wordfile, args.entropy, args.nb_words)
print_passphrase(words, entropy, args.separator, args.verbose)
if __name__ == '__main__':
main()
|
# ------------------------------------------------------------
#
# -----------------------
# -------- Tuple --------
# -----------------------
#
#
# [1] Tuple Items Are Enclosed in Parentheses
# [2] You Can Remove The Parentheses If You Want
# [3] Tuple Are Ordered, To Use Index To Access Item
# [4] Tuple Are Immutable => You Cant Add or Delete
# [5] Tuple Items Is Not Unique
# [6] Tuple Can Have Different Data Types
# [7] Operators Used in Strings and Lists Available In Tuples
# ------------------------------------------------------------
# Tuple With One Element
myTuple1 = ("Amira")
print(type(myTuple1))
print(myTuple1)
myTuple2 = "Amira"
print(type(myTuple2))
print(myTuple2)
# To avoid read as string , put , after the Element
myTuple1 = ("Amira",)
print(type(myTuple1))
print(myTuple1)
myTuple2 = "Amira",
print(type(myTuple2))
print(myTuple2)
# len() function to know how many Elements in Tuple
print(len(myTuple1))
print(len(myTuple2))
# Tuple Concatenation
a = (1, 2, 3, 4, 5, 6, 7)
b = (11, 12, 13, 14, 15)
c = a + b
d = a + ("A", "B","C","D",1.2, True) + b
print(c)
print(d)
# * Repeat for Tuple, List, String are the same
myString = "ABC"
myList = [1, 2, 3, 4, 5]
myTuple = ("A", "B", "C")
print(myString * 6)
print(myList * 6)
print(myTuple * 6)
# count() method count the value in the tuple
e=(1,2,3,4,5,6,2,45,2,6,2)
print(e.count(2))
print(e.count(6))
print(e.count(45))
# index() method get the value at the given Index
f=(1,2,3,4,5,8,10,14,16)
print(f"The index for the element value 10 %d" % f.index(10))
print("The index for the element value 10 {:d}".format(f.index(10)))
print(f"The index for the element value 10 {f.index(10)}")
# Destruct
g = ("A","B","C")
x1,y1,z1 = g
print(x1)
print(y1)
print(z1)
g1 = ("A","B",67,"C")
x2,y2,_,z2 = g1
print(x2)
print(y2)
print(z2)
|
from rest_framework.permissions import BasePermission
class IsSuperAdminUser(BasePermission):
"""Allows access only to SuperAdmin users."""
def has_permission(self, request, view):
"""Check condition for the permission."""
return bool(request.user and request.user.is_superuser)
|
from __future__ import print_function
from um_fileheaders import *
import numpy as np
from six.moves import builtins
import types
class umfile_error(Exception):
pass
class packerr(Exception):
pass
class UMFile():
# Should this inherit from io.something?
""" Extended version of file class that uses 8 byte words """
missval_i = -32768
missval_r = -1073741824
def __init__(self, filename, mode=None):
if not mode:
mode = 'rb'
if not "b" in mode:
mode += "b"
self.fileobj = builtins.open(filename, mode)
if "r" in mode:
self.determine_file_type()
self.readheader()
self.readlookup()
self.sectorsize = self.getsectorsize()
self.mask = None
self.nland = None
def close(self):
# Unless file was opened readonly, need to write the new header
# information before closing.
if not self.fileobj.mode == 'r':
self.writeheader()
self.writelookup()
self.fileobj.close()
def wordseek(self,offset):
self.fileobj.seek(offset*self.wordsize)
def wordread(self,size):
return self.fileobj.read(size*self.wordsize)
def arraywrite(self,array):
# Could use tofile here, but no real advantage.
# Need to check whether the native format is big or little
# Here assuming little
if array.dtype.byteorder == self.byteorder:
return self.fileobj.write(array.tobytes())
else:
return self.fileobj.write(array.byteswap().tobytes())
def determine_file_type(self):
# Get word length and byte order?
# Read first 16 bytes and try to interpret in various ways
self.fileobj.seek(0)
s = self.fileobj.read(16)
# For a UM fieldsfile, first word should be 20 and second 1, 2, or 4
# For ancillary file first word -32768
# Include = in the test to make output easier
self.fieldsfile = False
self.ppfile = False
for endian in ('=', '>', '<'):
h = np.fromstring(s,np.int64).newbyteorder(endian)
# print "testing 64 bit", h[:2]
if h[0] in [15, 20, -32768] and h[1] in (1, 2, 4):
self.byteorder = endian
self.wordsize = 8
self.int = np.int64
self.float = np.float64
self.fieldsfile = True
return
h = np.fromstring(s,np.int32).newbyteorder(endian)
# print "testing 32 bit", h[:2]
if h[0] in [15, 20, -32768] and h[1] in (1, 2, 4):
self.byteorder = endian
self.wordsize = 4
self.int = np.int32
self.float = np.float32
self.fieldsfile = True
return
if h[0] == 256:
self.byteorder = endian
self.wordsize = 4
self.int = np.int32
self.float = np.float32
self.ppfile = True
return
raise umfile_error("Error - file type not determined")
def readheader(self):
if not self.fieldsfile:
return
self.fileobj.seek(0)
# Fixed length header of length 256
s = self.wordread(256)
self.fixhd = np.fromstring(s,self.int).newbyteorder(self.byteorder)
# Integer constants
self.wordseek(self.fixhd[FH_IntCStart]-1)
nint = self.fixhd[FH_IntCSize]
s = self.wordread(nint)
self.inthead = np.fromstring(s,self.int).newbyteorder(self.byteorder)
# Real constants
self.wordseek(self.fixhd[FH_RealCStart]-1)
nreal = self.fixhd[FH_RealCSize]
s = self.wordread(nreal)
self.realhead = np.fromstring(s,self.float).newbyteorder(self.byteorder)
# Level dependent constants
if self.fixhd[FH_LevDepCStart] > 0:
self.wordseek(self.fixhd[FH_LevDepCStart]-1)
nlconst = self.fixhd[FH_LevDepCSize1]*self.fixhd[FH_LevDepCSize2]
s=self.wordread(nlconst)
self.levdep = np.fromstring(s,self.float).newbyteorder(self.byteorder)
self.levdep.shape=(self.fixhd[FH_LevDepCSize2],self.fixhd[FH_LevDepCSize1])
# Row dependent constants
if self.fixhd[FH_RowDepCStart] > 0:
self.wordseek(self.fixhd[FH_RowDepCStart]-1)
nlconst = self.fixhd[FH_RowDepCSize1]*self.fixhd[FH_RowDepCSize2]
s=self.wordread(nlconst)
self.rowdep = np.fromstring(s,self.float).newbyteorder(self.byteorder)
self.rowdep.shape=(self.fixhd[FH_RowDepCSize2],self.fixhd[FH_RowDepCSize1])
# Column dependent constants
if self.fixhd[FH_ColDepCStart] > 0:
self.wordseek(self.fixhd[FH_ColDepCStart]-1)
nlconst = self.fixhd[FH_ColDepCSize1]*self.fixhd[FH_ColDepCSize2]
s=self.wordread(nlconst)
# Should reshape this to a 2D array
self.coldep = np.fromstring(s,self.float).newbyteorder(self.byteorder)
self.coldep.shape=(self.fixhd[FH_ColDepCSize2],self.fixhd[FH_ColDepCSize1])
def getsectorsize(self):
# Calculate sectorsize as gcd of the data offsets.
# Assume it's not larger than default 2048
sector = gcd(2048,self.fixhd[FH_DataStart] - 1) # Actual start off by 1.
for k in range(self.fixhd[FH_LookupSize2]):
if self.ilookup[k,LBEGIN] == -99:
break
sector = gcd(sector,self.ilookup[k,LBNREC])
return sector
def createheader(self, intsize, realsize, levdepdim1=0, levdepdim2=0):
# Create a standard header, given level dependent constants as arguments
# Lengths of other sections may be version dependent?
# Fixed length header of length 256
self.fixhd = np.zeros(256,self.int)
# Integer constants
self.inthead = np.zeros(intsize,self.int)
# Real constants
self.realhead = np.zeros(realsize,self.float)
# Level dependent constants
if levdepdim1 > 0 and levdepdim2 > 0:
self.levdep = np.zeros((levdepdim2,levdepdim1),self.float)
def copyheader(self,f):
"""Copy all the header properties from specified open file"""
for attr in ["wordsize", "byteorder", "int", "float", "fieldsfile",
"ppfile"]:
setattr(self, attr, getattr(f,attr))
# Array attributes need to be copied.
for attr in ["fixhd", "realhead", "inthead"]:
setattr(self, attr, getattr(f,attr).copy())
# These ones need not exist
for attr in ["levdep", "rowdep", "coldep"]:
if hasattr(f, attr):
setattr(self, attr, getattr(f,attr).copy())
self.ilookup = f.ilookup.copy()
self.rlookup = f.rlookup.copy()
self.sectorsize = f.sectorsize
def writeheader(self):
# Header must already be defined by copying or creating
# Fixed length header of length 256
self.wordseek(0)
self.arraywrite(self.fixhd)
# Integer constants
self.wordseek(self.fixhd[FH_IntCStart]-1)
self.arraywrite(self.inthead)
# Real constants
self.wordseek(self.fixhd[FH_RealCStart]-1)
self.arraywrite(self.realhead)
# Level dependent constants
if self.fixhd[FH_LevDepCStart] > 0:
self.wordseek(self.fixhd[FH_LevDepCStart]-1)
self.arraywrite(self.levdep)
if self.fixhd[FH_RowDepCStart] > 0:
self.wordseek(self.fixhd[FH_RowDepCStart]-1)
self.arraywrite(self.rowdep)
if self.fixhd[FH_ColDepCStart] > 0:
self.wordseek(self.fixhd[FH_ColDepCStart]-1)
self.arraywrite(self.coldep)
def readlookup(self):
lookdim1 = self.fixhd[FH_LookupSize1]
lookdim2 = self.fixhd[FH_LookupSize2]
# Read lookup
self.wordseek(self.fixhd[FH_LookupStart]-1)
s = self.wordread(lookdim1*lookdim2)
# The lookup table has separate integer 1;45 and real 46-64 sections
# Simplest to have duplicate integer and real versions and just index
# into the appropriate parts
# Is it possible to make just make them views of the same data?
if lookdim1 != 64:
raise umfile_error("Unexpected lookup table dimension %d %d" % (lookdim1, lookdim2))
self.ilookup = np.reshape( np.fromstring(s, self.int).newbyteorder(self.byteorder), [lookdim2, lookdim1])
self.rlookup = np.reshape( np.fromstring(s, self.float).newbyteorder(self.byteorder), [lookdim2, lookdim1])
def print_fixhead(self):
print("FIXED HEADER")
for i in range(256):
if i % 8 == 0:
print("%5d:" % i,end="")
if self.fixhd[i] == self.missval_i or self.fixhd[i] == self.missval_r:
# -32768 is integer missing value, -1073741824 is an FP NaN
print(" _",end="")
else:
print("%8d" % self.fixhd[i],end="")
if i % 8 == 7:
print()
def getmask(self):
# Is it already defined
if self.mask != None:
return
# Get the land sea mask, code 30
for k in range(self.fixhd[FH_LookupSize2]):
if self.ilookup[k,LBEGIN] == -99:
break
if self.ilookup[k,ITEM_CODE] == 30:
self.mask = self.readfld(k)
self.nland = np.sum(self.mask!=0)
return
raise packerr("Land sea mask required for packing/unpacking")
def readfld(self, k, raw=False):
# Read field number k
ilookup = self.ilookup[k]
lbnrec = ilookup[LBNREC] # Size padded to record size
lblrec = ilookup[LBLREC] # Actual size w/o padding
lbegin = ilookup[LBEGIN] # lbegin is offset from start
self.wordseek(lbegin)
s = self.wordread(lbnrec)
if raw:
return s
packing = [0, ilookup[LBPACK]%10, ilookup[LBPACK]//10 % 10,
ilookup[LBPACK]//100 % 10, ilookup[LBPACK]//1000 % 10,
ilookup[LBPACK]//10000]
if packing[1] == 0:
# IEEE at same precision as the file
nbytes = lblrec*self.wordsize
if ilookup[DATA_TYPE]==1:
dtype = self.float
else:
# Treat integer and logical together
dtype = self.int
elif packing[1] == 2:
# 32 bit IEEE
nbytes = lblrec*4
if ilookup[DATA_TYPE]==1:
dtype = np.float32
else:
dtype = np.int32
else:
raise packerr("Packing with N1 = %d not supported" % packing[1])
if packing[2] == 0:
# No compression
npts = ilookup[LBNPT]
nrows = ilookup[LBROW]
# print "S", len(s), nbytes, len(np.fromstring(s[:nbytes], ftype))
if nrows*npts == ilookup[LBLREC]:
# As expected
data = np.reshape( np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder), [nrows, npts])
else:
# There are some fields (accumulated runoff) that are packed to
# land points, but don't have packing set correctly
data = np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder)
elif packing[2] == 2:
# Compressed using mask, nlon, nlat are the values from the land
# sea mask
if self.mask is None:
self.getmask()
nrows, npts = self.mask.shape
tmp = np.fromstring(s[:nbytes], dtype).newbyteorder(self.byteorder)
# Set output array to missing, forcing the missing value to the
# correct type.
data = np.zeros((nrows,npts), dtype) + np.array([self.missval_r], dtype)
if packing[3]==1:
# Use land mask (non-zero) values
# Should check the sizes match that expected
data.flat[self.mask.flat!=0] = tmp
else:
# Ocean values
data.flat[self.mask.flat==0] = tmp
else:
raise packerr("Packing with N2 = %d not supported - field code %d" % (packing[2],ilookup[ITEM_CODE]))
return data
def writefld(self, data, k, raw=False, overwrite=False):
# write the kth field
if overwrite:
filepos = self.ilookup[k,LBEGIN]
else:
if k==0:
filepos = self.fixhd[FH_DataStart] - 1
else:
filepos = self.ilookup[k-1,LBEGIN] + self.ilookup[k-1,LBNREC]
self.wordseek(filepos)
# If overwriting a field in an existing file don't change the header
if not overwrite:
self.ilookup[k,LBEGIN] = filepos
# Need to set the output record size here
if self.fixhd[FH_Dataset] == 3:
# Fieldsfile, NADDR is relative to start of fixed length header
# (i.e. relative to start of file)
self.ilookup[k,NADDR] = filepos
else:
# Ancillary files behave like dumps?
# NADDR is relative to start of data. Note that this uses LBLREC
# so ignores the packing and the record padding. No relation to
# the actual disk address in LBEGIN.
if k == 0:
self.ilookup[k,NADDR] = 1
else:
self.ilookup[k,NADDR] = self.ilookup[k-1,NADDR] + self.ilookup[k-1,LBLREC]
if raw:
# Data is just array of bytes
self.fileobj.write(data)
# Header is unchanged
return
else:
# Need to pack properly
packing = [0, self.ilookup[k,LBPACK]%10, self.ilookup[k,LBPACK]//10 % 10,
self.ilookup[k,LBPACK]//100 % 10, self.ilookup[k,LBPACK]//1000 % 10,
self.ilookup[k,LBPACK]//10000]
# First consider packing to land or sea points
if packing[2] == 0:
# No packing
packdata = data
elif packing[2] == 2:
if self.mask is None:
self.getmask()
# Need to restore the file pointer after the mask read
self.wordseek(filepos)
if packing[3]==1:
# Use land mask (non-zero) values
# Should check the sizes match that expected
packdata = data[self.mask!=0]
else:
# Ocean values
packdata = data[self.mask==0]
else:
raise packerr("Packing with N2 = %d not supported - field code %d" % (packing[2],self.ilookup[k,ITEM_CODE]))
# Now write the data
# arraywrite could actually return the sizes?
lblrec = packdata.size
self.arraywrite(packdata)
if not overwrite:
# Make the sector size a variable?
self.ilookup[k,LBLREC] = lblrec
if packing[1] == 2 and self.wordsize == 8:
size = (lblrec+1)/2
else:
size = lblrec
lbnrec = int(np.ceil(size/float(self.sectorsize))) * self.sectorsize
self.ilookup[k,LBNREC] = lbnrec
def writelookup(self):
# lookdim1 = self.fixhd[FH_LookupSize1]
# lookdim2 = self.fixhd[FH_LookupSize2]
# For compatibility with the old version use the full size
lookdim2, lookdim1 = self.ilookup.shape
# Need to combine the ilookup and rlookup arrays to a single array
# Convert the float part to an integer array
lookup = np.fromstring(self.rlookup[:lookdim2,:].tobytes(),self.int).newbyteorder(self.byteorder)
# Now copy the true integer part on top
lookup.shape = (lookdim2, lookdim1)
lookup[:,:45] = self.ilookup[:,:45]
self.wordseek(self.fixhd[FH_LookupStart]-1)
self.arraywrite(lookup)
class Axis:
def __init__(self,name,values):
# Should check name is lat, lon or lev and that the values are
# appropriate for the axis type.
self.name = name
self.values = values
def __eq__(self, a):
if self.name == a.name and len(self.values) == len(a.values):
return np.allclose(self.values, a.values)
else:
return False
def gcd(a,b):
while a > 0:
c = b%a
b = a
a = c
return b
class UniqueList(list):
# List without duplicates
def append(self,a):
if type(a) in [types.ListType,np.ndarray]:
for x in a:
if not x in self:
list.append(self,x)
else:
if not a in self:
list.append(self,a)
class Grid:
def __init__(self, lon, lat, lev):
# Check that dimensions match
# Really only for timeseries?
if len(lat) == len(lon) == len(lev):
self.lon = lon
self.lat = lat
self.lev = lev
else:
raise umfile_error("Inconsistent grids")
def __eq__(self, g):
if len(self.lon) == len(g.lon) and len(self.lat) == len(g.lat) and len(self.lev) == len(g.lev):
return np.allclose(self.lon, g.lon) and np.allclose(self.lat, g.lat) \
and np.allclose(self.lev, g.lev)
else:
return False
def isprog(ilookup):
# Check whether a STASH code corresponds to a prognostic variable.
# Section 33 is tracers, 34 is UKCA
# Also check whether variable is instantaneous, LBTIM < 10
# No time processing ilookup[LBPROC] == 0
# Not a time series LBCODE < 30000
# Also 3100 - 3129 seem to be treated as prognostics
varcheck = ilookup[ITEM_CODE]//1000 in [0,33,34] or \
3100 <= ilookup[ITEM_CODE] <= 3129
timecheck = ilookup[LBTIM] < 10 and ilookup[LBPROC] == 0 and ilookup[LBCODE] < 30000
return varcheck and timecheck
def istracer(ilookup):
return ilookup[ITEM_CODE]//1000 == 33 and ilookup[LBTIM] < 10 and ilookup[LBPROC] == 0 and ilookup[LBCODE] < 30000
|
from termcolor import colored as clr
from collections import OrderedDict
from fuzzer.common.FuzzData import FuzzData
from fuzzer.strategies import baseline_strategies as base
def heuristic(max_depth: int, properties: dict, fuzz_data: FuzzData):
""" Makes a search plan based on a heuristic """
ospf_links = fuzz_data.get_ospf_networks()
heuristic_links: list = find_heuristic_links(properties, max_depth, fuzz_data)
pretty_print_property_links(heuristic_links)
heuristic_subplan: list = gen_heuristic_subplan(max_depth, heuristic_links)
heuristic_plan: list = gen_full_plan(max_depth, heuristic_subplan, ospf_links)
return heuristic_plan
# @Tested (one test)
def gen_full_plan(max_depth: int, heuristic_subplan: list, links: list) -> list:
sorted_links = sorted(links)
full_dfs_plan: list = base.dfs(max_depth, sorted_links)
pre_validate_heuristic_gen(heuristic_subplan, full_dfs_plan)
heuristic_plan: list = union_plans(heuristic_subplan, full_dfs_plan)
post_validate_heuristic_gen(heuristic_plan, full_dfs_plan)
return heuristic_plan
# @Tested (one test)
def gen_heuristic_subplan(max_depth: int, heuristic_links: list):
heuristic_plan = []
for prop_links in heuristic_links:
prop_links.sort()
prop_plan: list = base.dfs(max_depth, prop_links)
heuristic_plan = union_plans(heuristic_plan, prop_plan)
return heuristic_plan
# @Tested
def union_plans(subplan: list, full_plan: list) -> list:
""" Combines the two plans by first adding states from the first one
and then states from the second one which are non-overlapping.
WARNING: plans must be sorted in the same order
"""
lookup_subplan = OrderedDict.fromkeys(subplan)
heuristic_plan = subplan.copy()
for state in full_plan:
if state not in lookup_subplan:
heuristic_plan.append(state)
return heuristic_plan
def find_heuristic_links(properties: dict, max_depth, fuzz_data: FuzzData) -> list:
links = []
topo = fuzz_data.get_topo()
topo_name = topo["meta"]["name"]
topo_containers = topo["containers"]
for prop in properties.values():
prop_links = set([])
src_container = prop["container_name"].split("-")[1]
src_links = find_container_links(src_container, topo_containers, topo_name)
if len(src_links) > max_depth:
links.append([])
continue
else:
prop_links.update(src_links)
for src_link in src_links:
neighbor_name = get_neighbor_name(src_link, src_container)
neighbor_links = find_container_links(neighbor_name, topo_containers, topo_name)
if len(neighbor_links) >= max_depth:
continue
else:
prop_links.update(neighbor_links)
links.append(list(prop_links))
return links
def find_container_links(cont_name: str, containers: list, topo_name) -> list:
container_links = []
container = find_container(cont_name, containers)
for iface in container["interfaces"]:
if not iface["ipaddr"].startswith("100."):
container_links.append("{}-{}".format(topo_name, iface["network"]))
return container_links
def find_container(c_name: str, containers: list) -> dict:
for container in containers:
if container["name"] == c_name:
return container
raise ValueError("Container {} not dound in topo file".format(c_name))
def get_neighbor_name(link: str, neighbor: str) -> str:
parts = link.split("-")
for dev in parts[2:]:
if dev != neighbor:
return dev
raise ValueError("No neighbor of {} on link {}".format(neighbor, link))
def pretty_print_property_links(heuristic_links: list):
print(clr("## Heuristic property links", 'magenta', attrs=['bold']))
for idx, prop_links in enumerate(heuristic_links, start=1):
print("Property {}: {}".format(idx, prop_links))
# @Tested
def pre_validate_heuristic_gen(heuristic_plan: list, full_plan: list):
if not heuristic_plan:
raise ValueError("Empty heuristic plan")
if not set(heuristic_plan).issubset(set(full_plan)):
raise ValueError("Heuristic plan is not subset of the full plan")
# @Tested
def post_validate_heuristic_gen(heuristic_plan: list, dfs_plan: list):
if len(heuristic_plan) != len(dfs_plan):
raise ValueError("Heuristic plan expected length - {} vs real {}".format(
len(heuristic_plan), len(dfs_plan)
))
|
import os
import cv2
from PIL import (
Image,
ImageDraw,
ImageFont,
)
import argparse
import textwrap
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Tuple, List
def logs_to_df(log_path: str, date_end_idx: int = 20) -> pd.DataFrame:
"""
Take an Overrustle .txt log and turn it into a pandas dataframe
with columns for date, username, and message.
"""
rows = []
with open(log_path, 'r', encoding='utf8') as reader:
line = reader.readline()
while line != '':
line = reader.readline()
date = line[1: date_end_idx]
# Get username based on the index of the colon : after the date
username_end_idx = date_end_idx + line[date_end_idx:].find(':')
username = line[date_end_idx + 6: username_end_idx]
# Add a colon to the username for convenience
username = '{}:'.format(username)
message = line[username_end_idx + 1: -1]
if not all([date, username, message]):
continue
rows.append({
'date': date,
'username': username,
'message': message,
})
df = pd.DataFrame(rows)
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S')
return df
def wrap_messages(username_text: List[str],
message_text: List[str],
max_text_width: int = 30,
indent_width: int = 4) -> str:
"""
Get a single string of chat messages with line breaks,
with long messages wrapped into new lines.
"""
lines = []
for usr, msg in zip(username_text, message_text):
wrapped_msg = textwrap.wrap(msg, width=max_text_width)
wrapped_msg = ('\n' + ' '*indent_width).join(wrapped_msg)
line = '{} {}'.format(usr, wrapped_msg)
lines.append(line)
return '\n'.join(lines)
def df_to_image(df: pd.DataFrame,
image_size: Tuple[int, int] = (350, 1080),
text_position: Tuple[int, int] = (5, 5),
font_size: int = 18,
text_color: Tuple[int, int, int] = (215, 215, 215),
bg_color: Tuple[int, int, int] = (25, 25, 25)) -> Image:
"""
Take a log dataframe and overlay its text on a blank image.
"""
image = Image.new('RGB', image_size, color=bg_color)
message_text = df['message'].values.tolist()
username_text = df['username'].values.tolist()
# Get each line of text as username: message
text_for_image = wrap_messages(username_text, message_text)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(
font=r'C:\Users\System-Pc\Desktop\arial.ttf',
size=font_size,
)
draw.text(
xy=text_position,
text=text_for_image,
font=font,
align='left',
fill=text_color,
)
return image
def fill_df(df: pd.DataFrame,
stored_df: pd.DataFrame,
target_len: int = 50) -> pd.DataFrame:
"""
Place rows from the stored dataframe before the target
dataframe in order to reach the target number of rows.
"""
rows_from_stored = target_len - len(df.index)
if rows_from_stored <= 0:
# If the rows are negative, the target dataframe already has
# enough rows, so just get the target rows from the bottom
return df.tail(target_len)
else:
# Otherwise, take the needed rows from the stored dataframe
return pd.concat([stored_df.tail(rows_from_stored), df])
def df_to_video(df: pd.DataFrame,
video_name: str,
video_size: Tuple[int, int] = (400, 1080),
max_messages: int = 50):
"""
Write a video of chat based on an Overrustle log .txt file
"""
frames_per_second = 1
# Initialize an OpenCV VideoWriter to create an mp4
video_writer = cv2.VideoWriter(
video_name,
cv2.VideoWriter_fourcc(*'mp4v'),
frames_per_second,
video_size,
)
# Make a dataframe that's empty to use to fill in for
# a dataframe that doesn't have enough rows
df_for_image = pd.DataFrame([{
'date': '',
'username': '',
'message': '',
}]*max_messages)
# Get a list of dates for every second
# between the first and last date
min_date, max_date = df['date'].min(), df['date'].max()
duration = int((max_date - min_date).total_seconds())
dates = [
min_date + pd.Timedelta(seconds=second)
for second in range(duration)
]
# Loop through each second
for date in tqdm(dates):
if not date:
continue
date_df = df[df['date'] == date]
df_for_image = fill_df(
df=date_df,
stored_df=df_for_image,
target_len=max_messages,
).copy()
image = df_to_image(
df=df_for_image,
image_size=video_size,
text_position=(5, 5),
)
# Convert image to an OpenCV acceptable numpy array and write to video
video_writer.write(np.array(image))
video_writer.release()
def main():
"""
Write a video based on an Overrustle chat log.
"""
parser = argparse.ArgumentParser(
description='Script to convert an Overrustle log to a chat video.')
parser.add_argument('-i', '--input', type=str,
default='OverRustleLogsDownloader/logs/Jerma985 chatlog/December 2019/2019-12-29.txt',
help='Filepath of an overrustle log file (.txt)')
parser.add_argument('-o', '--output', default='output.mp4',
help='Output video name, ')
args = parser.parse_args()
df = logs_to_df(args.input)
df_to_video(df=df, video_name=args.output)
if __name__ == '__main__':
main()
|
from select import select
from socket import socket
from typing import IO, Callable
class IOWrapper:
def __init__(self, io_stream: IO, socket_: socket = None):
self.io_stream = io_stream
self.socket = socket_
self.next_timeout_cb = None
def set_next_timeout_cb(self, cb: Callable):
self.next_timeout_cb = cb
def read_or_eof(self, size, field):
if self.next_timeout_cb and self.socket:
timeout = self.socket.gettimeout()
self.socket.setblocking(False)
while True:
ready_to_read, _, _ = select([self.socket], [], [], 1)
if ready_to_read:
break
else:
self.next_timeout_cb()
self.next_timeout_cb = None
self.socket.setblocking(True)
self.socket.settimeout(timeout)
buf: bytes = self.io_stream.read(size)
if len(buf) != size:
raise EOFError(field)
return buf
def read_short(self, field: str) -> int:
return int.from_bytes(self.read_or_eof(2, field), byteorder='big', signed=False)
def read_long(self, field: str) -> int:
return int.from_bytes(self.read_or_eof(4, field), byteorder='big', signed=False)
def read_byte(self, field: str) -> int:
return int.from_bytes(self.read_or_eof(1, field), byteorder='big', signed=False)
def read_bytes(self, n: int, field: str) -> bytes:
buf: bytes = self.read_or_eof(n, field)
return buf
def read_string(self, n: int, field: str) -> str:
buf: bytes = self.read_or_eof(n, field)
return str(buf, "utf-8")
def write_short(self, v: int):
self.io_stream.write(v.to_bytes(2, byteorder='big', signed=False))
def write_long(self, v: int):
self.io_stream.write(v.to_bytes(4, byteorder='big', signed=False))
def write_byte(self, v: int):
self.io_stream.write(v.to_bytes(1, byteorder='big', signed=False))
def write_bytes(self, b: bytes):
self.io_stream.write(b)
def write_string(self, s: str):
self.io_stream.write(bytes(s, "utf-8"))
def flush(self):
self.io_stream.flush()
def close(self):
self.io_stream.close()
|
'''
Os dicionários representam coleções de dados que contém na sua estrutura um
conjunto de pares chave/valor, nos quais cada chave individual tem um valor associado. Esse
objeto representa a ideia de um mapa, que entendemos como uma coleção associativa desordenada.
A associação nos dicionários é feita por meio de uma chave que faz referência a um valor.
'''
dados_cliente = {
'Nome': 'Renan',
'Endereco': 'Rua Cruzeiro do Sul',
'Telefone': '982503645'
}
print(dados_cliente['Nome']) # Renan
dados_cliente['Idade'] = 40 # cria uma nova chave e valor no dicionario dados_cliente
dados_cliente.pop('Telefone',None) # remove uma chave do dicionario
del dados_cliente['Idade'] # remove a chave do dicionario
print(dados_cliente) |
import FWCore.ParameterSet.Config as cms
# Set variables from the os environment
globalTag = 'MC_3XY_V26'
# Load Standard CMSSW process initial configurations
process = cms.Process("DHCand")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.GlobalTag.globaltag = cms.string('%s::All' % globalTag)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
secondaryFileNames = cms.untracked.vstring()
)
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
# Load modules
process.load("HiggsAnalysis.DoublyChargedHiggs.genEventFilters_cff")
process.load("HiggsAnalysis.DoublyChargedHiggs.compositeCandProducers_cff")
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('DHCand.root'),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring('p') ),
outputCommands = cms.untracked.vstring('keep *')
)
process.outpath = cms.EndPath(process.out)
## Set decay mode
#analysisMode = 'MMMM'
#analysisMode = 'EEEE'
analysisMode = 'EMEM'
process.dhEventFilter_step = cms.Sequence(getattr(process, 'dhGenEventFilterTo'+analysisMode))
candProducerModules = None
for i in set( (analysisMode[0:2], analysisMode[2:4]) ):
if candProducerModules == None:
candProducerModules = getattr(process, 'dhCandTo'+i)
else:
candProducerModules += getattr(process, 'dhCandTo'+i)
process.dhCand_step = cms.Sequence(candProducerModules)
process.p = cms.Path(
process.dhEventFilter_step *
process.dhCand_step *
process.atLeastOneDHCandFilter
)
|
import io
import base64
from repository.in_memory_data import in_memory_photos
def test_root_redirects(configured_app):
response = configured_app.get('/').status_code
assert response == 302
def test_root_unconfigured_redirects(unconfigured_app):
response = unconfigured_app.get('/').status_code
assert response == 302
def test_start_page_does_not_redirect_if_setup_present(configured_app):
response = configured_app.get('/', follow_redirects=True).data
assert b'Blog App' in response
def test_start_page_redirects_to_setup_if_setup_not_present(unconfigured_app):
response = unconfigured_app.get('/', follow_redirects=True).data
assert b'Connection Setup:' in response
def test_loading_contains_static_value(configured_app):
response = configured_app.get('/posts/').data
assert b'Blog App' in response
def test_browsing_to_posts_route_redirects_to_setup_automatically(unconfigured_app):
response = unconfigured_app.get('/posts/', follow_redirects=True).data
assert b'Connection Setup:' in response
def test_loading_contains_dynamic_value(configured_app):
response = configured_app.get('/posts/?Page=1').data
assert b'FirstTitle' in response
def test_loading_returns_false_for_random_text(configured_app):
response = configured_app.get('/posts/').data
assert b'RandomText' not in response
def test_loading_cotains_static_value_post_page(configured_app):
response = configured_app.get('/api/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d').data
assert b'FirstTitle' in response
def test_post_page_redirects_to_setup_page(unconfigured_app):
response = unconfigured_app.get('/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Connection Setup:' in response
def test_loading_contains_dynamic_value_post_page(configured_app):
response = configured_app.get('/api/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d').data
assert b'Specific content first post' in response
def test_logged_user_can_edit_own_post(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
response = configured_app.get('/posts/update/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Submit' in response
def test_logged_user_can_remove_own_post(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
response = configured_app.get('/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Remove' in response
def test_logged_user_cannot_edit_other_post(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/update/daca57d1-c180-4e0a-8394-f5c95a5d5f23',
follow_redirects=True).status_code
assert post_page == 403
def test_logged_user_cannot_remove_other_post(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/remove/daca57d1-c180-4e0a-8394-f5c95a5d5f23',
follow_redirects=True).status_code
assert post_page == 403
def test_logged_admin_can_edit_other_post(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/daca57d1-c180-4e0a-8394-f5c95a5d5f23',
follow_redirects=True).data
assert b'Edit' in post_page
def test_logged_admin_can_edit_other_post_other_author_first_author(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Edit' in post_page
def test_logged_admin_can_remove_post(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/daca57d1-c180-4e0a-8394-f5c95a5d5f23',
follow_redirects=True).data
assert b'Remove' in post_page
def test_logged_admin_can_edit_other_post_other_author(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Edit' in post_page
def test_logged_admin_can_remove_other_post_other_author(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/f9c3a576-28bc-4b63-931d-04d6488d2f0d',
follow_redirects=True).data
assert b'Remove' in post_page
def test_logged_admin_can_edit_other_post_other_author_third(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/a656f973-5b82-462d-aff7-8d2c6c3e4fa2',
follow_redirects=True).data
assert b'Edit' in post_page
def test_logged_admin_can_remove_other_post_other_author_third(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/a656f973-5b82-462d-aff7-8d2c6c3e4fa2',
follow_redirects=True).data
assert b'Remove' in post_page
def test_pagination_for_added_post(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
added_post = {"NameInput":"TestTitle",
"ContentInput":"TestContent",
'Image-File': (io.BytesIO(b'test_image_content'), "test.jpg")}
configured_app.post('/posts/add',
content_type='multipart/form-data',
data=added_post,
follow_redirects=True)
post_page = configured_app.get('/posts/?Page=0', follow_redirects=True).data
assert b'TestTitle' in post_page
def test_pagination_for_filter_applied(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
post_page = configured_app.get('/posts/?Users=FirstAuthor&Page=0', follow_redirects=True).data
assert b'TestTitle' in post_page
def test_pagination_moves_post_to_next_page_if_number_exceeds(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
post_page = configured_app.get('/posts/?Page=2', follow_redirects=True).data
assert b'FirstTitle' in post_page
def test_filtering_shows_no_posts_for_author_with_no_posts(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
post_page = configured_app.get('/posts/?Users=admin&Page=0', follow_redirects=True).data
assert b'FirstTitle' or b'SecondTitle' or b'ThirdTitle' in post_page
def test_filtering_no_parameter_provided_in_url_provides_none_as_parameter_users(configured_app):
post_page = configured_app.get('/posts/?Users=&Page=0', follow_redirects=True).data
assert b'FirstTitle' or b'SecondTitle' or b'ThirdTitle' in post_page
def test_filtering_no_parameter_provided_in_url_provides_none_as_parameter_page(configured_app):
post_page = configured_app.get('/posts/?Users=&Page=', follow_redirects=True).data
assert b'FirstTitle' or b'SecondTitle' or b'ThirdTitle' in post_page
def test_pagination_gets_next_if_there_are_more_existing_posts(configured_app):
post_page = configured_app.get('/posts', follow_redirects=True).data
assert b'Next' in post_page and b'Previous' not in post_page
def test_pagination_gets_previous_if_there_are_no_more_posts_forward_only_backward(configured_app):
post_page = configured_app.get('/posts/?Users=&Page=2', follow_redirects=True).data
assert b'Previous' in post_page and b'Next' not in post_page
def test_pagination_gets_both_buttons_for_previous_and_next_pages(configured_app):
login_data = {"NameInput": "admin", "PasswordInput": "adminpass"}
configured_app.post('/authentication/login', data=login_data,
follow_redirects=True)
post_page = configured_app.get('/posts/?Page=1', follow_redirects=True).data
assert b'Previous' in post_page and b'Next' in post_page
def test_pagination_dissapears_if_no_further_posts_are_available(configured_app):
post_page = configured_app.get('/posts/?Users=FirstAuthor', follow_redirects=True).data
assert b'Next' not in post_page and b'Previous' not in post_page
def test_post_addition_adds_no_image_if_none_is_selected(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login',
data=login_data,
follow_redirects=True)
added_post = {"NameInput":"TestTitle",
"ContentInput":"TestContent",
'Image-File':(io.BytesIO(b'test_image_content'), "test.jpg")}
response_data = configured_app.post('/posts/add',
content_type='multipart/form-data',
data=added_post,
follow_redirects=True).data
assert in_memory_photos['default'].encode() in response_data
def test_post_loads_image_if_image_exists(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
response_data = configured_app.get('/api/posts/a656f973-5b82-462d-aff7-8d2c6c3e4fa2',
follow_redirects=True).data
assert in_memory_photos['local3'].encode('utf-8') in response_data
def test_post_loads_default_image_if_image_is_removed(configured_app):
login_data = {"NameInput": "SecondAuthor", "PasswordInput": "spass"}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
print(configured_app.post('/posts/update/daca57d1-c180-4e0a-8394-f5c95a5d5f23',
data={'remove-image': ''},
follow_redirects=True).data)
after_edit = configured_app.get('/api/posts/daca57d1-c180-4e0a-8394-f5c95a5d5f23').data
assert in_memory_photos['local1'].encode('utf-8') in after_edit
def test_post_changes_image_when_new_image_is_loaded(configured_app):
login_data = {"NameInput": "ThirdAuthor", "PasswordInput": "tpass"}
post_data = {'update-image': '', 'image-file': ''}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
response_data = configured_app.post('/posts/update/be3e1383-d296-4956-85d2-d0da74c78531',
data=post_data,
follow_redirects=True).data
assert b'/images/default.png' in response_data
def test_post_default_image_is_replaced_if_new_image_is_loaded(configured_app):
login_data = {"NameInput": "FirstAuthor", "PasswordInput": "fpass"}
post_data = {'NameInput': 'Test',
'ContentInput': 'Test',
'Image-File': (io.BytesIO(b'test_image_content'), "test.jpg")}
configured_app.post('/authentication/login', data=login_data, follow_redirects=True)
response_data = configured_app.post('/posts/update/3cb862a3-3bf7-44a2-83d8-7b7440588b68',
data=post_data,
follow_redirects=True).data
encoded_image_data = base64.b64encode(b'test_image_content')
assert encoded_image_data in response_data
def test_post_flash_message_for_no_selected_image(configured_app):
login_data = {"NameInput": "admin",
"PasswordInput": "adminpass"}
post_data = {'NameInput': 'Test',
'ContentInput': 'Test',
'Image-File': (io.BytesIO(b''), "")}
configured_app.post('/authentication/login',
data=login_data,
follow_redirects=True)
response_data = configured_app.post('/posts/update/0d816f70-0ed1-4cee-b156-112462e6ea52',
data=post_data,
follow_redirects=True).data
assert b'No image found. Provide a valid file' in response_data
def test_post_flash_message_for_illegal_format_selected_as_image(configured_app):
login_data = {"NameInput": "admin",
"PasswordInput": "adminpass"}
post_data = {'NameInput': 'TestName',
'ContentInput': 'TestContent',
'Image-File': (io.BytesIO(b'test_illegal_content'), 'illegal.exe')}
configured_app.post('/authentication/login',
data=login_data,
follow_redirects=True)
response_data = configured_app.post('/posts/update/0d816f70-0ed1-4cee-b156-112462e6ea52',
data=post_data,
follow_redirects=True).data
assert b'Invalid file type! Make sure a valid file format is selected' in response_data
def test_post_flash_message_for_default_image_to_remove(configured_app):
login_data = {"NameInput": "admin",
"PasswordInput": "adminpass"}
post_data = {'remove-image': ''}
configured_app.post('/authentication/login',
data=login_data,
follow_redirects=True)
response_data = configured_app.post('/posts/update/0d816f70-0ed1-4cee-b156-112462e6ea52',
data=post_data,
follow_redirects=True).data
assert b'No image present. Nothing to remove' in response_data
|
"""
Ресурс, предоставляющий доступ к данным о студенческих отказах о мероприятиях.
"""
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
import models.db_session as db_session
from models.all_models import student_declines, Student
parser = RequestParser()
parser.add_argument('contest_id', type=int)
class StudentDeclinesResource(Resource):
def post(self, vk_id: str):
args = parser.parse_args()
if args['contest_id'] is None:
return 409
session = db_session.create_session()
student_id = session.query(Student).filter(Student.vk_id == vk_id).first().id
inserting = student_declines.insert().values(student_id=student_id,
contest_id=args['contest_id'])
db_session.engine.execute(inserting)
return 200
def delete(self, vk_id: str):
args = parser.parse_args()
if args['contest_id'] is None:
return 409
session = db_session.create_session()
student_id = session.query(Student).filter(Student.vk_id == vk_id).first().id
deleting = student_declines.delete().where(student_id=student_id,
contest_id=args['contest_id'])
db_session.engine.execute(deleting)
return 200
|
# -*- coding: utf-8 -*-
"""Convenience wrapper function to simplify the interface to launch a :class:`aiida_shell.ShellJob` job."""
from __future__ import annotations
import logging
import pathlib
import shlex
import tempfile
import typing as t
from aiida.common import exceptions, lang
from aiida.engine import Process, WorkChain, launch
from aiida.orm import AbstractCode, Computer, Data, ProcessNode, SinglefileData, load_code, load_computer
from aiida.parsers import Parser
from aiida_shell import ShellCode, ShellJob
__all__ = ('launch_shell_job',)
LOGGER = logging.getLogger('aiida_shell')
def launch_shell_job( # pylint: disable=too-many-arguments
command: str | AbstractCode,
nodes: t.Mapping[str, str | pathlib.Path | Data] | None = None,
filenames: dict[str, str] | None = None,
arguments: list[str] | str | None = None,
outputs: list[str] | None = None,
parser: t.Callable[[Parser, pathlib.Path], dict[str, Data]] | None = None,
metadata: dict[str, t.Any] | None = None,
submit: bool = False,
) -> tuple[dict[str, Data], ProcessNode]:
"""Launch a :class:`aiida_shell.ShellJob` job for the given command.
:param command: The shell command to run. Should be the relative command name, e.g., ``date``. An ``AbstractCode``
instance will be automatically created for this command if it doesn't already exist. Alternatively, a pre-
configured ``AbstractCode`` instance can be passed directly.
:param nodes: A dictionary of ``Data`` nodes whose content is to replace placeholders in the ``arguments`` list.
:param filenames: Optional dictionary of explicit filenames to use for the ``nodes`` to be written to ``dirpath``.
:param arguments: Optional list of command line arguments optionally containing placeholders for input nodes. The
arguments can also be specified as a single string. In this case, it will be split into separate parameters
using ``shlex.split``.
:param outputs: Optional list of relative filenames that should be captured as outputs.
:param parser: Optional callable that can implement custom parsing logic of produced output files.
:param metadata: Optional dictionary of metadata inputs to be passed to the ``ShellJob``.
:param submit: Boolean, if ``True`` will submit the job to the daemon instead of running in current interpreter.
:raises TypeError: If the value specified for ``metadata.options.computer`` is not a ``Computer``.
:raises ValueError: If the absolute path of the command on the computer could not be determined.
:returns: The tuple of results dictionary and ``ProcessNode``, or just the ``ProcessNode`` if ``submit=True``. The
results dictionary intentionally doesn't include the ``retrieved`` and ``remote_folder`` outputs as they are
generated for each ``CalcJob`` and typically are not of interest to a user running ``launch_shell_job``. In
order to not confuse them, these nodes are omitted, but they can always be accessed through the node.
"""
computer = (metadata or {}).get('options', {}).pop('computer', None)
if isinstance(command, str):
code = prepare_code(command, computer)
else:
lang.type_check(command, AbstractCode)
code = command
if isinstance(arguments, str):
arguments = shlex.split(arguments)
else:
lang.type_check(arguments, list, allow_none=True)
inputs = {
'code': code,
'nodes': convert_nodes_single_file_data(nodes or {}),
'filenames': filenames,
'arguments': arguments,
'outputs': outputs,
'parser': parser,
'metadata': metadata or {},
}
if submit:
current_process = Process.current()
if current_process is not None and isinstance(current_process, WorkChain):
return {}, current_process.submit(ShellJob, **inputs)
return {}, launch.submit(ShellJob, **inputs)
results, node = launch.run_get_node(ShellJob, **inputs)
return {label: node for label, node in results.items() if label not in ('retrieved', 'remote_folder')}, node
def prepare_code(command: str, computer: Computer | None = None) -> AbstractCode:
"""Prepare a code for the given command and computer.
This will automatically prepare the computer
:param command: The command that the code should represent. Can be the relative executable name or absolute path.
:param computer: The computer on which the command should be run. If not defined the localhost will be used.
:return: A :class:`aiida.orm.nodes.code.abstract.AbstractCode` instance.
"""
computer = prepare_computer(computer)
code_label = f'{command}@{computer.label}'
try:
code: AbstractCode = load_code(code_label)
except exceptions.NotExistent as exception:
LOGGER.info('No code exists yet for `%s`, creating it now.', code_label)
with computer.get_transport() as transport:
status, stdout, stderr = transport.exec_command_wait(f'which {command}')
executable = stdout.strip()
if status != 0:
raise ValueError(
f'failed to determine the absolute path of the command on the computer: {stderr}'
) from exception
code = ShellCode( # type: ignore[assignment]
label=command, computer=computer, filepath_executable=executable, default_calc_job_plugin='core.shell'
).store()
return code
def prepare_computer(computer: Computer | None = None) -> Computer:
"""Prepare and return a configured computer.
If not computer is defined, the computer labeled ``localhost`` will be loaded. If that doesn't exist, it will be
created, using ``core.local`` and ``core.direct`` as the entry points for the transport and scheduler type,
respectively. In that case, the safe transport interval and the minimum job poll interval will both be set to 0
seconds in order to guarantee a throughput that is as fast as possible.
:param computer: The computer to prepare.
:return: A configured computer.
:raises TypeError: If the provided computer is not an instance of :class:`aiida.orm.Computer`.
"""
if computer is not None and not isinstance(computer, Computer):
raise TypeError(f'`metadata.options.computer` should be instance of `Computer` but got: {type(computer)}.')
if computer is None:
LOGGER.info('No computer specified, assuming `localhost`.')
try:
computer = load_computer('localhost')
except exceptions.NotExistent:
LOGGER.info('No `localhost` computer exists yet: creating and configuring the `localhost` computer.')
computer = Computer(
label='localhost',
hostname='localhost',
description='Localhost automatically created by `aiida.engine.launch_shell_job`',
transport_type='core.local',
scheduler_type='core.direct',
workdir=tempfile.gettempdir(),
).store()
computer.configure(safe_interval=0.)
computer.set_minimum_job_poll_interval(0.)
computer.set_default_mpiprocs_per_machine(1)
default_user = computer.backend.default_user
if default_user and not computer.is_user_configured(default_user):
computer.configure(default_user)
return computer
def convert_nodes_single_file_data(nodes: t.Mapping[str, str | pathlib.Path | Data]) -> t.MutableMapping[str, Data]:
"""Convert ``str`` and ``pathlib.Path`` instances to ``SinglefileData`` nodes.
:param nodes: Dictionary of ``Data``, ``str``, or ``pathlib.Path``.
:raises TypeError: If a value in the mapping is of invalid type.
:raises FileNotFoundError: If a filepath ``str`` or ``pathlib.Path`` does not correspond to existing file.
:returns: Dictionary of filenames onto ``SinglefileData`` nodes.
"""
processed_nodes: t.MutableMapping[str, Data] = {}
for key, value in nodes.items():
if isinstance(value, Data):
processed_nodes[key] = value
continue
if isinstance(value, str):
filepath = pathlib.Path(value)
else:
filepath = value
if not isinstance(filepath, pathlib.Path):
raise TypeError(
f'received type {type(filepath)} for `{key}` in `nodes`. Should be `Data`, `str`, or `Path`.'
)
filepath.resolve()
if not filepath.exists():
raise FileNotFoundError(f'the path `{filepath}` specified in `nodes` does not exist.')
with filepath.open('rb') as handle:
processed_nodes[key] = SinglefileData(handle, filename=str(filepath.name))
return processed_nodes
|
# Script para dividir o arquivo 'fasta', de forma a igualar o proces-
# samento entre os nucleos (max de 20)
# Nome do arquivo a ser processado abaixo, associado a esta variavel
proteins_fasta = 'GCF_000347755.3_Ccap_2.1_protein'
new_file = open(proteins_fasta + '.txt', 'w')
# Numero de divisoes para a proteina ser processada
num_divisions = 7
# Funcao para ler um arquivo, recebendo o nome do arquivo como entrada
def read_file(filename):
fasta_file = open(filename + '.faa')
return fasta_file
# Funcao que recebe um arquivo como entrada, e retorna uma lista com
# os cabecalhos e proteinas
def save_splitted(fasta_file):
# Declaracao de uma lista vazia, onde vao ser inseridos os cabecalhos
# com sua respectiva sequencia em seguida. Modelo de lista abaixo:
# ['cabecalho_1', 'sequencia1', 'cabecalho_2', 'sequencia_2'] ...
splitted_elements = []
# Leitura do arquivo e atribuicao de cada linha do arquivo a um elemen-
# to de uma lista
lines = fasta_file.readlines()
# 'element' eh uma variavel que ira armazenar cada elemento da lista
# final. Caso a linha inicie com '>', significa que o elemento em
# questao eh um cabecalho. Caso nao, significa que o elemento eh uma
# proteina, portanto, deve-se concatenar cada linha de proteina ate
# que se encontre uma linha de cabecalho
element = ""
for line in lines:
# Neste if, caso ele encontre uma linha com um cabecalho, como ele ja
# concatenou as linhas de proteina, ele vai anexar a lista a protei-
# na concatenada, e depois vai anexar a linha com o cabecalho. Apos
# isso, vai zerar o elemento para que ele passe a concatenar a proxi-
# ma proteina ate que o proximo cabecalho chegue.
if line[0] == ">":
splitted_elements.append(element)
splitted_elements.append(line.strip())
element = ""
else:
element += line.strip()
# Aqui ele anexa a ultima proteina, e remove o primeiro termo, que eh
# sempre um "", e depois retorna a lista
splitted_elements.append(element)
splitted_elements.pop(0)
return splitted_elements
# proteins eh uma lista contendo apenas as proteinas, sem seus devidos
# cabecalhos no indice anterior, lista essa criada para se obter o
# numero total de aminoacidos
# Esta formatacao ([1::2]) indica apenas as posicoes impares da lista
def proteins(complete_list):
proteins_list = complete_list[1::2]
return proteins_list
# A funcao total_size recebe uma lista ja pre-formatada com cabecalhos
# e proteinas e retorna o numero total de aminoacidos contida nela
def total_size(proteins_list):
size = 0
# Este laco percorre cada elemento da lista de proteinas, e em cada um,
# incrementa o numero de aminoacidos
for element in proteins_list:
size += len(element)
return size
# A funcao division recebe como parametro uma lista contendo as prote-
# inas em sequencia e recebe tambem a quantidade total de aminoacidos
# do arquivo. Alem disso, utiliza tambem a variavel num_divisions,
# que corresponde ao numero de divisoes que o arquivo deve ser parti-
# cionado para ser processado
def division(proteins_list, size):
checkpoints = []
# A variavel partition_size eh a quantidade de aminoacidos que cada
# particao, em media, deve ter, e não ira passar desse valor
partition_size = int(size / num_divisions)
# print (partition_size)
# A variavel partial_size eh o valor de comparacao a ser feito com a
# posicao atual (inicia em 0 e vai incrementando o tamanho de cada
# aminoacido que percorre)
partial_size = 0
# A variavel count eh a posicao ordinal da particao, contada como in-
# dice de uma lista (sendo 0 a primeira posicao), e utilizada como
# fator multiplicador do tamanho da particao
count = 0
# A variavel partition eh a posicao atual de busca do tomanho ideal das
# divisoes. A cada ponto de encontro (onde eh definido o tamanho ide-
# al da particao, o valor da variavel partition eh incrementado com
# o valor da particao, para que seja encontrado o novo ponto de
# encontro)
partition = partition_size
total_aa = 0
# Loop que percorre toda a cadeia de proteinas, analisando cada protei-
# na, e se ja chegou no tamanho ideal
for element in proteins_list:
# if para definir se ja chegou no tamanho ideal (se o numero de amino-
# acidos contado ate agora ultrapassou o numero ideal da particao)
if(partial_size > partition):
# A proxima linha volta para o inicio desta proteina
position = partial_size - partition
# No proximo if, eh feita uma analise se deve ou nao incluir a proteina
# avaliada nesta posicao, ou seja, se o ponto de encontro esta antes
# ou depois da metade dela
if(position <= (int(len(element)/2))):
# print(proteins_list[proteins_list.index(element) - 1])
# print(proteins_list.index(element) - 1)
checkpoints.append(proteins_list.index(element) - 1)
# print(len(element)/2)
# print(checkpoints)
new_file.write('Partition number ')
new_file.write(str(count+1))
new_file.write(' has ')
# print ('partial_size = ', partial_size)
# print ('total aa = ', total_aa)
partition_aa = partial_size - total_aa
# print ('partition_aa = ', partition_aa)
total_aa += partition_aa
new_file.write(str(partition_aa))
new_file.write(' aminoacids')
# new_file.write(' finishes in ')
# new_file.write(str(proteins_list[proteins_list.index(element) - 1]))
new_file.write('\n')
else:
# print(element)
# print(proteins_list.index(element))
checkpoints.append(proteins_list.index(element))
# print('teste2')
# print(position)
# print(len(element)/2)
# print(checkpoints)
# print ('partial_size = ', partial_size)
# print ('total aa = ', total_aa)
partition_aa = partial_size - total_aa
# print ('partition_aa = ', partition_aa)
total_aa += partition_aa
new_file.write('Partition number ')
new_file.write(str(count+1))
new_file.write(' has ')
new_file.write(str(partition_aa))
new_file.write(' aminoacids')
# new_file.write(' finishes in ')
# new_file.write(str(proteins_list[proteins_list.index(element) - 1]))
new_file.write('\n')
count += 1
partition = partition_size * (count+1)
if (count == num_divisions-1):
break
partial_size += len(element)
# print ('partial_size = ', size)
# print ('total aa = ', total_aa)
partition_aa = size - total_aa
# print ('partition_aa = ', partition_aa)
new_file.write('Partition number ')
new_file.write(str(count+1))
new_file.write(' has ')
new_file.write(str(partition_aa))
new_file.write(' aminoacids')
new_file.close()
return checkpoints
def create_files(checkpoints, fasta_file, num_divisions):
gt_counter = 0
cp_counter = 0
lines = fasta_file.readlines()
num_divisions = str(num_divisions)
original_name = str('-' + num_divisions + '_' + proteins_fasta + '.faa')
now_file = open('1' + original_name, 'w')
for line in lines:
if (line[0] == ">"):
gt_counter += 1
try:
if (gt_counter > checkpoints[cp_counter]):
cp_counter += 1
now_file.close()
now_file = open(str(cp_counter+1) + original_name, 'w')
except:
pass
now_file.write(line)
now_file.close()
# fasta_file eh uma variavel do tipo arquivo que ira ler o arquivo a
# ser trabalhado
fasta_file = read_file(proteins_fasta)
print (fasta_file)
# splitted_elements eh a lista contendo os cabecalhos e proteinas
splitted_elements = save_splitted(fasta_file)
proteins_list = proteins(splitted_elements)
# amino_acids eh a quantidade total de aminoacidos do arquivo
amino_acids = total_size(proteins_list)
# print (amino_acids)
new_file.write('Number of aminoacids = ')
new_file.write(str(amino_acids))
new_file.write('\n\n')
checkpoints = division(proteins_list, amino_acids)
fasta_file = read_file(proteins_fasta)
create_files(checkpoints, fasta_file, num_divisions)
fasta_file.close()
|
import unittest
from functions.funtionLibrary import *
class testEditSQLString(unittest.TestCase):
def testEditSQLString(self):
"""This checks to see if the EDIT sQL string is built"""
val = editSQLStr("BookTitle","New Book Title",2)
self.assertEqual(val,"UPDATE bookStore SET BookTitle = 'New Book Title' WHERE bookID ='2'")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#foo abs
print(abs(-1))
#foo max
print(max(1, -2, 3))
#type change
int('123')
float('12.34')
str(1.23)
bool(1)
bool('')
#function name
a = abs
print(a(-1))
#hex
print(hex(255))
print(hex(1000))
|
from django.db import models
class ProductCategory(models.Model):
title = models.CharField(max_length=1000, verbose_name='عنوان')
description = models.TextField(null=True, blank=True, verbose_name='توضیحات')
def __str__(self):
return self.title
class Meta:
verbose_name = 'دستهی محصول'
verbose_name_plural = 'دستههای محصول'
class Product(models.Model):
title = models.CharField(max_length=2000, verbose_name='عنوان')
description = models.TextField(verbose_name='توضیحات')
image = models.ImageField(verbose_name='تصویر')
price = models.PositiveIntegerField(verbose_name='قیمت (تومان)')
category = models.ForeignKey(ProductCategory, related_name='products', verbose_name='دسته')
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'محصولات'
verbose_name = 'محصول'
|
def merge(arr,l,mid,r):
L = arr[:mid]
R = arr[mid:]
i = j = k = 0
while(i<len(L) and j<len(R)):
if(L[i]<=R[j]):
arr[k] = L[i]
i = i+1
else:
arr[k] = R[j]
j=j+1
k = k+1
while(i<len(L)):
arr[k] = L[i]
i = i+1
k = k+1
while(j<len(R)):
arr[k] = R[j]
j = j+1
k = k+1
def merge_sort(arr,l,r):
if(l<r):
mid=(l+r)//2
merge_sort(arr,l,mid)
merge_sort(arr,mid+1,r)
merge(arr,l,mid,r)
a = [4,5,7,1,2,3,6,88,4,11,22,4,5]
merge_sort(a,0,len(a)-1)
print(a) |
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_thumbs.db.models import ImageWithThumbsField
# Create your models here.
# class User(AbstractUser):
# avatar = ImageWithThumbsField(upload_to='avatar/%Y-%m-%d/', blank=True, null=True, sizes=((136, 136),))
# position = models.CharField(_('position'), max_length=64, blank=True, null=True)
class Timestamps(models.Model):
"""docstring for Timestamps"""
created_at = models.DateTimeField(_('date of creation'), auto_now_add=True)
updated_at = models.DateTimeField(_('last modification date'), auto_now=True)
class Meta:
abstract = True
class Reviews(models.Model):
created_at = models.DateTimeField(_('date of creation'), )
updated_at = models.DateTimeField(_('last modification date'), auto_now=True)
first_name = models.CharField(_('first name'), max_length=30)
last_name = models.CharField(_('last name'), max_length=30)
review = models.TextField(_('review'))
photo = ImageWithThumbsField(_('photo'), upload_to='review/%Y-%m-%d/', sizes=((128, 128),), default='default-avatar.128x128.jpg')
class Meta:
verbose_name = _('review')
verbose_name_plural = _('reviews')
def __unicode__(self):
return _("%(first_name)s %(last_name)s") % {
'first_name': self.first_name,
'last_name': self.last_name,
}
def admin_thumbnail(self):
if self.photo:
return "<img src='%s' height='64px' style='border-radius: 50%%;' />" % (self.photo.thumbnail(128, 128))
return "<div style='width: 64px; height: 64px; background: aliceblue; text-align: center; display: table-cell; vertical-align: middle; border-radius: 50%;'>No photo</div>"
admin_thumbnail.short_description = _('photo')
admin_thumbnail.allow_tags = True
class Banner(Timestamps):
title = models.CharField(_('title'), max_length=64)
text = models.TextField(_('text'))
show = models.BooleanField(_('is show'), default=True)
url = models.CharField(_('url'), max_length=128, default="/")
image = ImageWithThumbsField(upload_to='banner/%Y-%m-%d/', sizes=((195, 64),))
class Meta:
verbose_name = _('banner')
verbose_name_plural = _('banners')
def admin_thumbnail(self):
if self.image:
return "<img src='%s' height='64px' />" % (self.image.thumbnail(195, 64))
return "<div style='width: 195px; height: 64px; background: aliceblue; text-align: center; display: table-cell; vertical-align: middle;'>No photo</div>"
admin_thumbnail.short_description = _('banner')
admin_thumbnail.allow_tags = True
class OurAdvantages(Timestamps):
"""docstring for OurAdvantages"""
title = models.CharField(_('title'), max_length=64)
description = models.TextField(_('description'))
show = models.BooleanField(_('is show'), default=True)
image = ImageWithThumbsField(_('image'), upload_to='advantages/%Y-%m-%d/', sizes=((64, 64),))
class Meta:
verbose_name = _('our advantages')
verbose_name_plural = _('our advantages')
def admin_thumbnail(self):
if self.image:
return "<img src='%s' height='64px' />" % (self.image.thumbnail(64, 64))
return "<div style='width: 64px; height: 64px; background: aliceblue; text-align: center; display: table-cell; vertical-align: middle;'>No photo</div>"
admin_thumbnail.short_description = _('image')
admin_thumbnail.allow_tags = True
class News(Timestamps):
title = models.CharField(_('title'), max_length=64)
text = models.TextField(_('text'))
miniature = ImageWithThumbsField(_('miniature'), upload_to='news/%Y-%m-%d/', blank=True, null=True, sizes=((298, 176),))
class Meta:
verbose_name = _('news')
verbose_name_plural = _('news')
def __unicode__(self):
return _("%(title)s") % {'title': self.title}
class Logo(Timestamps):
"""docstring for Logo"""
name = models.CharField(_('name'), max_length=64)
image = ImageWithThumbsField(_('image'), upload_to='core/logo/%Y-%m-%d/', sizes=((109, 62),))
class Meta:
verbose_name = _('logo')
verbose_name_plural = _('logos')
def __unicode__(self):
return _("%(name)s") % {'name': self.name}
def admin_thumbnail(self):
if self.image:
return "<img src='%s' height='62px' />" % (self.image.thumbnail(109, 62))
return "<div style='width: 109px; height: 62px; background: aliceblue; text-align: center; display: table-cell; vertical-align: middle;'>No photo</div>"
admin_thumbnail.short_description = _('image')
admin_thumbnail.allow_tags = True
|
import subprocess as sp
import mdlPil, mdlGFX, ThunderBorg
def PreFlight():
image = mdlPil.creatImage()
image = ControllerCheck(image)
image = TBCheck(image)
mdlGFX.gfxDisplay(image)
return image
def ControllerCheck(image):
stdoutdata = sp.getoutput("hcitool con")
if "00:06:F7:13:66:8F" in stdoutdata.split():
image = mdlPil.Controller(True, image)
else:
image = mdlPil.Controller(False, image)
return image
def TBCheck(image):
TB1 = ThunderBorg.ThunderBorg()
TB1.i2cAddress = 10
TB1.Init()
image = mdlPil.TB(TB1.foundChip, image, 1)
TB2 = ThunderBorg.ThunderBorg()
TB2.i2cAddress = 11
TB2.Init()
image = mdlPil.TB(TB1.foundChip, image, 2)
return image
|
import json
import requests
# A simple class to store attributes of a single track.
class Track(object):
def __init__(self, name, track_id, artist):
# Name of the track (Can be used to display on the front end)
self.name = name
# ID of the track (Will be used to get the Lyrics)
self.id = track_id
# Name of the artist
self.artist = artist
def addLyrics(self, lyrics):
# Variable storing lyrics of the current track.
self.lyrics = lyrics
def label(self, mood) :
# This is to store the final label (Happy/Sad) after classification is complete.
self.mood = mood
class Musix(object):
def __init__(self, country="us", apikey="acf266ecb81687ee6f567e6fe9d0ca06"):
# Country variable will be used for getting charts.
self.country = country
# Developer registration.
self.apikey = apikey
# Changing the country of search
def change_country(self, country):
self.country = country
# Function to retrive lyrics of top k songs in a country.
# Step 1 : Get the Songs/Tracks. Store their name and ID.
# Step 2 : Get lyrics for each track using it's ID.
def get_top_lyrics(self, k):
# Get the songs.
tracks = self.get_top_songs()
# Get the Lyrics for first 'k' objects in the list.
for track in tracks[:k]:
track.addLyrics(self.get_lyrics(track.id))
return tracks;
# This function will get the top 'songs' of the desired country.
def get_top_songs(self):
# The URL for getting charts.
url = "https://api.musixmatch.com/ws/1.1/chart.tracks.get"
# Params for the Request
querystring = {"format": "json", "callback": "callback",
"country": self.country, "apikey": self.apikey, "page_size":100,}
headers = {'Accept': "text/plain", }
# Perform Request and get the JSON response.
response = requests.request("GET", url, headers=headers, params=querystring).json()
# Iterate over the track list and extract the track names and track id's
songs = []
for track in response['message']['body']['track_list']:
# Get the name
name = track['track']['track_name']
# Get the ID
track_id = track['track']['track_id']
# Get the artist name
artist = track['track']['artist_name']
# Create a Track Object
trackObj = Track(name, track_id, artist)
# Save the track object.
songs.append(trackObj)
return songs
# Function to request for lyrics of the given track_id
def get_lyrics(self, track_id):
# URL to get the Lyrics
url = "https://api.musixmatch.com/ws/1.1/track.lyrics.get"
# Params for the reqest.
querystring = {"format": "json", "callback": "callback",
"track_id": track_id, "apikey": self.apikey}
headers = {'Accept': "application/json", }
# Perform the Request
response = requests.request("GET", url, headers=headers, params=querystring).json()
# Extract and return the lyrics.
# The reason I am using 'replace' function is because Musixmatch appends a string to the lyrics.
# We need to remove this because it will cause a problem in the classification.
return response['message']['body']['lyrics']['lyrics_body'].replace("******* This Lyrics is NOT for Commercial use *******", "")
|
from res_util import *
from bird_co import *
# from other_util import *
# conda install pandas
# conda install librosa
# conda install pandas
# conda install pandas
# conda install pandas
import os
import sys
import gc
import time
import math
import shutil
import random
import warnings
import typing as tp
from pathlib import Path
from contextlib import contextmanager
# import yaml
from joblib import delayed, Parallel
import cv2
import librosa
import audioread
import soundfile as sf
import numpy as np
import pandas as pd
# from fastprogress import progress_bar
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU
from torch.nn.modules.utils import _pair
import torch.utils.data as data
pd.options.display.max_rows = 500
pd.options.display.max_columns = 500
TARGET_SR = 32000
model_config = {
"base_model_name": "RESNET",
"pretrained": True, #False,
"num_classes": 264,#264,
"trained_weights": "trained_model_4153237.pth"
}
melspectrogram_parameters = {
"n_mels": 128,
"fmin": 20,
"fmax": 16000
}
# ROOT = "M:/birdsong-recognition"
INPUT_ROOT = Path("M:/")
RAW_DATA = INPUT_ROOT / "birdsong-recognition"
TRAIN_AUDIO_DIR = RAW_DATA / "train_audio"
TEST_AUDIO_DIR = RAW_DATA / "test_audio"
train_csv = pd.read_csv(RAW_DATA / "train.csv")
# TEST AUDIO STUFF _ NOT NEEDED NOW
# if not TEST_AUDIO_DIR.exists():
# TEST_AUDIO_DIR = INPUT_ROOT / "birdcall-check" / "test_audio"
# test = pd.read_csv(INPUT_ROOT / "birdcall-check" / "test.csv")
# else:
# test = pd.read_csv(RAW_DATA / "test.csv")
# sub = pd.read_csv("../input/birdsong-recognition/sample_submission.csv")
# print(sub)
# print("_____sub")
# sub.to_csv("submission.csv", index=False) # this will be overwritten if everything goes well
# print(sub)
def set_seed(seed: int = 42):
# random.seed(seed)
# np.random.seed(seed)
# os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
set_seed(1213)
def mono_to_color(X: np.ndarray,
mean=None,
std=None,
norm_max=None,
norm_min=None,
eps=1e-6):
"""
Code from https://www.kaggle.com/daisukelab/creating-fat2019-preprocessed-data
"""
# Stack X as [X,X,X]
X = np.stack([X, X, X], axis=-1)
# Standardize
mean = mean or X.mean()
X = X - mean
std = std or X.std()
# print(f"std {std}")
Xstd = X / (std + eps)
_min, _max = Xstd.min(), Xstd.max()
norm_max = norm_max or _max
norm_min = norm_min or _min
if (_max - _min) > eps:
# Normalize to [0, 255]
V = Xstd
V[V < norm_min] = norm_min
V[V > norm_max] = norm_max
V = 255 * (V - norm_min) / (norm_max - norm_min)
V = V.astype(np.uint8)
else:
# Just zero
V = np.zeros_like(Xstd, dtype=np.uint8)
return V
def get_model(args: tp.Dict):
# # get resnest50_fast_1s1x64d
model = ResNet(
Bottleneck, [3, 4, 6, 3],
radix=1, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True)
del model.fc
# # use the same head as the baseline notebook.
model.fc = nn.Sequential(
nn.Linear(2048, 1024), nn.ReLU(), nn.Dropout(p=0.2),
nn.Linear(1024, 1024), nn.ReLU(), nn.Dropout(p=0.2),
nn.Linear(1024, args["num_classes"]))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
state_dict = torch.load(args["trained_weights"], map_location=device)
model.load_state_dict(state_dict)
return model
# SAVE ALL BIRDS WAV IMAGE TO FILE
# a.tofile('test2.dat')
# c = np.fromfile('test2.dat', dtype=int)
import os
def pad(arr,lenz):
if arr.shape[0]>=lenz:
return arr
else:
zz = np.zeros(lenz)
zz[:len(arr)] = arr
return zz
def save_dataset_to_file(df,sta,leng,img_size,melspectrogram_parameters,bird_num_to_start):
if not os.path.exists('M:/birdsong-recognition/np/'):
os.makedirs('M:/birdsong-recognition/np/')
for index,row in df.iterrows():
# TO START AT A CERTAIN NUMBER
# if index < bird_num_to_start:
# continue
try:
print(str(row.ebird_code) + "/" + row.filename)
print(f"index: {index}")
SR = 32000
print(f"in datagen: {index}")
# oo=TRAIN_AUDIO_DIR / Path(str(row.ebird_code))
clip, _ = librosa.load(os.path.join("M:/birdsong-recognition/train_audio/"+str(row.ebird_code) + "/" +str(row.filename)),
sr=SR,
mono=True,
res_type="kaiser_fast")
start_seconds = sta
length_in_sec = leng
# end_index = SR * end_seconds
start_index = start_seconds * SR
end_index = start_index + (length_in_sec * SR)
temp_y = clip[start_index:end_index].astype(np.float32)
# print(f"amoutn : {(end_index-(SR*start_index))}")
# print(f"{temp_y.shape}")
y = pad(temp_y,(end_index-start_index))
melspec = librosa.feature.melspectrogram(y, sr=SR, **melspectrogram_parameters)
melspec = librosa.power_to_db(melspec).astype(np.float32)
image = mono_to_color(melspec)
height, width, _ = image.shape
image = cv2.resize(image, (int(width * img_size / height), img_size))
image = np.moveaxis(image, 2, 0)
image = (image / 255.0).astype(np.float32)
# print(image.flatten().shape)
# print(image.shape)
# print(type(image))
# print(type(image[0][0][0]))
np.save('M:/birdsong-recognition/np/'+str(index)+'_'+row.ebird_code, image)
# image.tofile('./np/'+str(index)+'_'+row.ebird_code+'.dat')
# c = np.fromfile('test2.dat', dtype=int)
except:
print("failed for some reason, index below")
print(index)
# return image, BIRD_CODE.get(row.ebird_code)
# VERSION 3.0 - FROM FILE - FullDataset
import glob
import re
def pad(arr,lenz):
if arr.shape[0]>=lenz:
return arr
else:
zz = np.zeros(lenz)
zz[:len(arr)] = arr
return zz
class FullDataset_new_from_file(data.Dataset):
def __init__(self, df: pd.DataFrame, indy=-1):
self.df = df
self.indy = indy
def __len__(self):
if self.indy > -1:
return 1
else:
return len(self.df)
def __getitem__(self, index: int):
SR = 32000
# print(f"ind in datagen: {index}")
if self.indy > -1:
index = self.indy
# image.tofile('./saves/'+str(index)+'_'+row.ebird_code+'.dat')
filez = glob.glob('M:/birdsong-recognition/np/'+str(index)+'_*.npy')
bird_name = re.search(r"(?<=_).*(?=\.)", filez[0]).group(0)
# image = np.fromfile(filez[0], dtype=float)
image = np.load(filez[0]).reshape(3,224,-1)
# print(f"img: {image.shape} bird: {bird_name} bridcode: {BIRD_CODE.get(bird_name)}")
return image, BIRD_CODE.get(bird_name)
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
dtype = torch.cuda.FloatTensor
# torch.backends.cudnn.benchmark = True
model_train = None
valid_sampler = None
def train(train_df: pd.DataFrame,
test_audio: Path,
model_config: dict,
mel_params: dict,
target_sr: int,
threshold=0.5, #was .5
how_far_to_go = 25,
batches = 100,
epochs = 100
):
global model_train
global valid_sampler
# use_cuda = torch.cuda.is_available()
device = torch.device("cuda")
model_train = get_model(model_config)
model_train = model_train.cuda()
warnings.filterwarnings("ignore")
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = optim.SGD(model_train.parameters(), lr=0.01, momentum=0.5)
#GENERATE dataset/images on demand
# dataset = FullDataset_new(df=train_df[:150],
# img_size=224,
# melspectrogram_parameters=mel_params)
#GET dataset from preloaded Files
dataset = FullDataset_new_from_file(df=train_df[:how_far_to_go])
validation_split = .05
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
# SHUFFLE DATASET
np.random.seed(42) #change seed if yu want
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
training_generator = torch.utils.data.DataLoader(dataset, batch_size=batches,
sampler=train_sampler)
# validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
# sampler=valid_sampler)
# training_generator = data.DataLoader(dataset, batch_size=batches, shuffle=True)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
max_epochs = epochs
count = 0
all_losses = []
for epoch in range(max_epochs):
print(f"epoch {epoch}")
# Training
batch_losses = []
##https://gist.github.com/conormm/5b26a08029b900520bcd6fcd1f5712a0
for ix, (local_batch, local_labels) in enumerate(training_generator):
# print("pre_cudaAvail")
if torch.cuda.is_available():
# print("post_cudaAvail")
_X, _y = local_batch.type(dtype).cuda(), local_labels.long().cuda() #torch.cuda.ByteTensor
# _X, _y = torch.tensor(local_batch, dtype=torch.float, device=device),torch.tensor(local_labels, dtype=torch.uint8, device=device)
# x = x.type(torch.cuda.FloatTensor)
# _X = Variable(_X, requires_grad=True).cuda()
# _y = Variable(_y, requires_grad=True)
# print(_y)
# print(_y.shape)
# print(f"xx shape: {_X.shape}")
#==========Forward pass===============
preds = model_train(_X)
loss = criterion(preds, _y) #F.cross_entropy(preds, _y) #
#==========backward pass==============
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print(f"loss data: {loss.data}")
# print(loss.item())
# print(type(loss.item()))
# print(type(int(loss.item())))
batch_losses.append(loss.item())
all_losses.append(loss.item())
torch.cuda.empty_cache()
mbl = np.mean(np.sqrt(batch_losses)).round(3)
if ix % 100 == 0:
print("index {}, Batch loss: {}".format(ix, mbl))
# if epoch % 2 == 0:
print("Epoch [{}/{}], Batch loss: {}".format(epoch, max_epochs, np.mean(all_losses).round(3)))
print("returning model ")
return model_train
# import re
# import glob
# filez = glob.glob('./np/'+str(2)+'_*.npy')
# print(filez)
# image = np.fromfile(filez[0], dtype=float)
# image = np.load(filez[0])
# image.reshape(3,224,-1).shape
# a
def check_guess(numba):
datasetz = FullDataset_new_from_file(df=train_csv,indy=numba)
loader = data.DataLoader(datasetz, batch_size=1, shuffle=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_train.eval()
# image = image.to(device)
for ix, (img, lab) in enumerate(loader):
# print(lab, img)
with torch.no_grad():
# _X, _y = local_batch.type(dtype).cuda(), local_labels.long().cuda()
prediction_1 = model_train(img.type(dtype).cuda()) #F.sigmoid(model_train(img.type(dtype).cuda()))
# print(prediction_1)
proba = (prediction_1.detach().cpu().numpy().reshape(-1))
events = proba >= .8
# print(np.argsort(-proba))
labels = np.argsort(-proba).tolist() #np.argsort(-proba)[:events.sum()].tolist()
# print("_________________________")
# print(f"actual labels: {INV_BIRD_CODE[lab.item()]}")
# print(f"best guess: {INV_BIRD_CODE[labels[0]]}")
# print(labels)
# print(INV_BIRD_CODE[lab.item()] == INV_BIRD_CODE[labels[0]])
# print("_________________________")
return (INV_BIRD_CODE[lab.item()] == INV_BIRD_CODE[labels[0]])
def validate():
print(f"validatins indices: {list(valid_sampler)}")
# for zz in valid_sampler:
# print(zz)
correct_arr = []
for i in range(20):
wasCorrect = check_guess(random.randint(0,how_far_to_go))
correct_arr.append(wasCorrect)
print("{0:.0%} Correct - FROM RANDO TRAIN SET".format(np.asarray(correct_arr).mean()))
correct_arr = []
for i in list(valid_sampler):
wasCorrect = check_guess(i)
correct_arr.append(wasCorrect)
print("{0:.0%} Correct - FROM VALID SAMPLER".format(np.asarray(correct_arr).mean()))
how_far_to_go = 21373 #21373
print(how_far_to_go)
# BIG STEP - long process
# save_dataset_to_file(train_csv,5,5,224,melspectrogram_parameters,12327)
### TRAIN AND GUESS
# train(train_df=train_csv[:],
# test_audio=TRAIN_AUDIO_DIR,
# model_config=model_config,
# mel_params=melspectrogram_parameters,
# target_sr=TARGET_SR,
# threshold=0.5,
# how_far_to_go = how_far_to_go,
# batches = 22, #was 100
# epochs = 18
# )
# torch.save(model_train.state_dict(), './trained_model_'+str(random.randint(1,10000000))+'.pth')
##### CHECK TRAINING
# validate()
# TEST STUFF
# _________________________
TEST_FOLDER = 'M:/birdsong-recognition/example_test_audio/' #'M:/birdsong-recognition/test_audio/'
test_df = pd.read_csv('M:/birdsong-recognition/mock_example_test_audio_summary_2.csv') #('M:/birdsong-recognition/test.csv')
# test_info.head()
def prediction(test_df: pd.DataFrame,
test_audio: Path,
model_config: dict,
mel_params: dict,
target_sr=32000,
threshold=0.5):
model_pred = get_model(model_config)
warnings.filterwarnings("ignore")
prediction_dfs = []
leng = 5
SR = 32000
img_size = 224
for index,row in test_df.iterrows():
# TO START AT A CERTAIN NUMBER
# if index < bird_num_to_start:
# continue
# try:
if row.site != "site_3":
SR = 32000
print(f"in datagen: {index}")
clip, _ = librosa.load(os.path.join(TEST_FOLDER + row.audio_id + '.mp3'),
sr=SR,
mono=True,
res_type="kaiser_fast")
start_seconds = int(row.seconds)-5
length_in_sec = leng
# end_index = SR * end_seconds
start_index = start_seconds * SR
end_index = start_index + (length_in_sec * SR)
temp_y = clip[start_index:end_index].astype(np.float32)
# print(f"amoutn : {(end_index-(SR*start_index))}")
# print(f"{temp_y.shape}")
y = pad(temp_y,(end_index-start_index))
melspec = librosa.feature.melspectrogram(y, sr=SR, **melspectrogram_parameters)
melspec = librosa.power_to_db(melspec).astype(np.float32)
image = mono_to_color(melspec)
height, width, _ = image.shape
image = cv2.resize(image, (int(width * img_size / height), img_size))
image = np.moveaxis(image, 2, 0)
image = (image / 255.0).astype(np.float32)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_pred.eval()
with torch.no_grad():
prediction_1 = model_pred(torch.from_numpy(np.expand_dims(image, axis=0)).type(dtype = torch.cuda.FloatTensor).cuda())
# print(prediction_1) #np.expand_dims(image))
# prediction_1 = model_pred(image.type(dtype = torch.cuda.FloatTensor).cuda())
proba = -prediction_1.detach().cpu().numpy().reshape(-1)
my_cutoff = 3
proba_filter = [proba<my_cutoff]
print(np.sum(proba_filter))
labels = np.argsort(proba).tolist()
print(labels)
bz = ""
for boid in labels[:np.sum(proba_filter)]:
bz = bz+ str(INV_BIRD_CODE[boid]) +" " # +"+"+str(INV_BIRD_CODE[labels[1]])+"+"+str(INV_BIRD_CODE[labels[2]])
print(f"these boidz: {bz}")
if len(bz) < 1:
bz = "nocall"
prediction_df = pd.DataFrame({
"row_id": [str(row.row_id)],
"birds": [bz]
# "actual_bird_remove": [row.birds]
})
prediction_dfs.append(prediction_df)
else:
print(row.site)
SR = 32000
print(f"in datagen: {index}")
clip, _ = librosa.load(os.path.join(TEST_FOLDER + row.audio_id + '.mp3'),
sr=SR,
mono=True,
res_type="kaiser_fast")
leng_np = clip.shape[0]//32000
print(leng_np)
num_five_intervals = leng_np//5
print(num_five_intervals)
c = np.arange(num_five_intervals-2)
np.random.shuffle(c)
how_many_random_samples = 10
endz = len(c) if len(c) < how_many_random_samples else how_many_random_samples
bz = []
for time_index in c[:endz]:
print(time_index*5)
start_seconds = time_index*5
length_in_sec = leng
# end_index = SR * end_seconds
start_index = start_seconds * SR
end_index = start_index + (length_in_sec * SR)
temp_y = clip[start_index:end_index].astype(np.float32)
# print(f"amoutn : {(end_index-(SR*start_index))}")
# print(f"{temp_y.shape}")
y = pad(temp_y,(end_index-start_index))
melspec = librosa.feature.melspectrogram(y, sr=SR, **melspectrogram_parameters)
melspec = librosa.power_to_db(melspec).astype(np.float32)
image = mono_to_color(melspec)
height, width, _ = image.shape
image = cv2.resize(image, (int(width * img_size / height), img_size))
image = np.moveaxis(image, 2, 0)
image = (image / 255.0).astype(np.float32)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_pred.eval()
with torch.no_grad():
prediction_1 = model_pred(torch.from_numpy(np.expand_dims(image, axis=0)).type(dtype = torch.cuda.FloatTensor).cuda())
# print(prediction_1) #np.expand_dims(image))
# prediction_1 = model_pred(image.type(dtype = torch.cuda.FloatTensor).cuda())
proba = -prediction_1.detach().cpu().numpy().reshape(-1)
my_cutoff = 3
proba_filter = [proba<my_cutoff]
print(np.sum(proba_filter))
labels = np.argsort(proba).tolist()
print(labels)
# print([INV_BIRD_CODE[labels[ii]] for ii in range(len(labels))])
# print(INV_BIRD_CODE[248])
for boid in labels[:np.sum(proba_filter)]:
bz.append(boid)
# bz = bz+ str(INV_BIRD_CODE[boid]) +" " # +"+"+str(INV_BIRD_CODE[labels[1]])+"+"+str(INV_BIRD_CODE[labels[2]])
print(f"these boidz: {bz}")
print(len(bz))
b_string = ""
if len(bz) == 0:
b_string = "nocall"
else:
for boid in list(set(bz)):
b_string = b_string+ str(INV_BIRD_CODE[boid]) +" "
print(f"total boidz: {b_string}")
prediction_df = pd.DataFrame({
"row_id": [str(row.row_id)],
"birds": [b_string]
# "actual_bird_remove": [row.birds]
})
prediction_dfs.append(prediction_df)
# except Exception as e: # work on python 3.x
# print('Failed: '+ str(e))
prediction_df = pd.concat(prediction_dfs, axis=0, sort=False).reset_index(drop=True)
return prediction_df
def do_sub():
submission = prediction(test_df=test_df,
test_audio=TEST_FOLDER,
model_config=model_config,
mel_params=melspectrogram_parameters,
target_sr=32000,
threshold=0.56
)
print(submission)
submission.to_csv("submission.csv", index=False)
# RUN TEST FILE AND CREATE SUBMISSION
do_sub()
# TODO
# 1. change filename_seconds to rwo_id
# ----make sure prediction works with test.csv
# 2.make sure site stuff works |
import random
p = [4, 3, 4, 4, 5, 3, 5, 4, 4, 5, 4, 4, 3, 4, 5, 4, 3, 4]
b = ['b', 0, 'B']
f = [{i: [0, 0] for i in range(4)} for z in range(3)]
w = None
for r in range(3):
c = True
a = [0, 1, 2, 3]
m = None
while c:
t = [map(lambda x: random.randint(x-1, x+1), p) for i in range(4)]
s = [sum(i) for i in t]
g = [[l if b[l-p[i]+1] == 0 else b[l-p[i]+1] for i, l in enumerate(l)] for l in t]
m = min(s)
if s.count(m) == 1:
c = False
if w is not None:
l = max(s)
i = s.index(l)
f[r][w] = [l, g[i]]
del s[i]
del g[i]
a.remove(w)
for i in range(len(a)):
f[r][a[i]] = [s[i], g[i]]
w = s.index(min(s))
for r in f:
print "Round %d" % (f.index(r)+1)
for p, q in sorted(r.iteritems(), key=lambda (x, y): y[0]):
print "Player %d: %s - %d" % ((p+1), reduce(lambda x, y: '{} {}'.format(x, y), q[1]), q[0])
|
# file_write.py
try:
fw = open("mynote.txt", 'w') #覆盖写
fw = open("mynote.txt", 'x') #如果原文件存在则报错
fw = open("mynote.txt", 'a') #追加
print("打开文件成功!")
fw.write("你好!")
print("写入文件成功能")
fw.write("ABC")
fw.writelines( ["这是第一个字符串", '这是第二个字符串'])
fw.write("1234\n")
fw.write("这是第二行!")
fw.close()
print("关闭文件成功")
except OSError:
print("打开写文件失败!") |
# task 2.3
name = "Eric"
message = f"Hello {name}, would you like to learn some Python today?"
print(message)
# task 2.4
print(name.upper())
print(name.lower())
print(name.title())
# task 2.5
message = 'Albert Einstein once said, "A person who never made a mistake never tried anything new."'
print(message)
# task 2.6
famous_name = "Albert Einstein"
quote = "A person who never made a mistake never tried anything new."
message = f'{famous_name} once said, "{quote}"'
print(message)
# task2.7
famous_name = "\t Albert Einstein"
message = f'{famous_name} once said, "{quote}"'
print(message)
famous_name = "\n Albert Einstein"
message = f'{famous_name} once said, "{quote}"'
print(message)
famous_name = "\n \t Albert Einstein \t"
message = f'{famous_name} once said, "{quote}"'
print(message)
message = f'{famous_name.lstrip()} once said, "{quote}"'
print(message)
message = f'{famous_name.rstrip()} once said, "{quote}"'
print(message)
message = f'{famous_name.strip()} once said, "{quote}"'
print(message)
|
def divides(n, p):
return n % p == 0
def divides_list(n, l):
for p in l:
if divides(n, p):
return True
return False
def next_prime(l):
counter = max(l) + 1
while divides_list(counter, l):
counter += 1
return counter
prime_list = [2]
for i in range(1, 10002):
n = next_prime(prime_list)
prime_list.append(n)
print(n)
|
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test import Client
from tests.factories.gbe_factories import (
ActFactory,
BioFactory,
ConferenceFactory,
ProfileFactory,
)
from tests.functions.gbe_functions import (
grant_privilege,
login_as,
assert_alert_exists,
)
from gbetext import (
default_act_draft_msg,
default_act_submit_msg,
act_coord_instruct,
missing_profile_info,
no_comp_msg,
)
from gbe.models import (
Conference,
UserMessage,
)
from tests.functions.ticketing_functions import setup_fees
from ticketing.models import Transaction
class TestCoordinateAct(TestCase):
'''Tests for create_act view'''
view_name = 'act_coord_create'
def setUp(self):
self.client = Client()
@classmethod
def setUpTestData(cls):
cls.url = reverse(cls.view_name, urlconf='gbe.urls')
Conference.objects.all().delete()
cls.factory = RequestFactory()
cls.performer = BioFactory(
contact__phone="111-222-3333",
contact__user_object__first_name="first",
contact__user_object__last_name="last")
cls.current_conference = ConferenceFactory(accepting_bids=True)
UserMessage.objects.all().delete()
cls.privileged_user = ProfileFactory.create().user_object
grant_privilege(cls.privileged_user,
'Act Coordinator',
'assign_act')
grant_privilege(cls.privileged_user,
'Act Reviewers')
cls.url = reverse(cls.view_name, urlconf='gbe.urls')
def get_act_form(self, persona=False, valid=True):
if not persona:
persona = self.performer
form_dict = {'theact-b_title': 'An act',
'theact-track_title': 'a track',
'theact-track_artist': 'an artist',
'theact-b_description': 'a description',
'theact-bio': persona.pk,
'theact-act_duration': '1:00',
'theact-b_conference': self.current_conference.pk,
'submit': 1,
}
if not valid:
del(form_dict['theact-b_description'])
return form_dict
def post_act_submission(self, next_page=None, persona=False):
act_form = self.get_act_form(persona)
url = self.url
if next_page is not None:
url = "%s?next=%s" % (url, next_page)
login_as(self.privileged_user, self)
response = self.client.post(url, data=act_form, follow=True)
return response, act_form
def test_bid_act_get_form(self):
login_as(self.privileged_user, self)
response = self.client.get(self.url)
self.assertContains(response, "Create Act for Coordinator")
self.assertContains(response, act_coord_instruct)
self.assertContains(response, "Submit & Review")
def test_act_bid_post_form_not_valid(self):
login_as(self.privileged_user, self)
data = self.get_act_form(valid=False)
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Create Act for Coordinator")
self.assertContains(response, "This field is required.")
def test_act_submit_act_succeed(self):
tickets = setup_fees(self.current_conference, is_act=True)
response, data = self.post_act_submission()
just_made = self.performer.acts.all().first()
self.assertRedirects(response, reverse('act_review',
urlconf="gbe.urls",
args=[just_made.id]))
self.assertContains(response, just_made.b_title)
assert_alert_exists(
response, 'success', 'Success', default_act_submit_msg)
self.assertTrue(Transaction.objects.filter(
purchaser__matched_to_user=just_made.performer.contact.user_object,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=self.current_conference
).exists())
def test_act_submit_draft(self):
tickets = setup_fees(self.current_conference, is_act=True)
act_form = self.get_act_form()
del(act_form['submit'])
act_form['draft'] = 1
login_as(self.privileged_user, self)
response = self.client.post(self.url, data=act_form, follow=True)
just_made = self.performer.acts.all().first()
self.assertRedirects(response, reverse('act_review_list',
urlconf="gbe.urls"))
assert_alert_exists(
response, 'success', 'Success', default_act_draft_msg)
self.assertTrue(Transaction.objects.filter(
purchaser__matched_to_user=just_made.performer.contact.user_object,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=self.current_conference
).exists())
def test_act_submit_act_incomplete_profile(self):
incomplete = BioFactory()
tickets = setup_fees(self.current_conference, is_act=True)
response, data = self.post_act_submission(persona=incomplete)
just_made = incomplete.acts.all().first()
data['theact-bio'] = incomplete.pk
self.assertRedirects(response, "%s?next=%s" % (
reverse('admin_profile',
urlconf="gbe.urls",
args=[incomplete.contact.pk]),
reverse('act_review', urlconf="gbe.urls", args=[just_made.id])))
assert_alert_exists(
response, 'warning', 'Warning', missing_profile_info)
self.assertTrue(Transaction.objects.filter(
purchaser__matched_to_user=incomplete.contact.user_object,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=self.current_conference
).exists())
def test_act_submit_act_succeed_w_redirect(self):
tickets = setup_fees(self.current_conference, is_act=True)
response, data = self.post_act_submission(next_page="/theredirect")
just_made = self.performer.acts.all().first()
self.assertRedirects(response, "%s?next=/theredirect" % (
reverse('act_review', urlconf="gbe.urls", args=[just_made.id])))
self.assertContains(response, just_made.b_title)
self.assertContains(response, "/theredirect")
assert_alert_exists(
response, 'success', 'Success', default_act_submit_msg)
self.assertTrue(Transaction.objects.filter(
purchaser__matched_to_user=just_made.performer.contact.user_object,
ticket_item__ticketing_event__act_submission_event=True,
ticket_item__ticketing_event__conference=self.current_conference
).exists())
def test_act_submit_act_no_viable_ticket(self):
response, data = self.post_act_submission()
just_made = self.performer.acts.all().first()
self.assertRedirects(response, reverse('act_review',
urlconf="gbe.urls",
args=[just_made.id]))
assert_alert_exists(
response, 'danger', 'Error', no_comp_msg)
def test_bad_priv(self):
login_as(self.performer.contact, self)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_act_title_collision(self):
data = self.get_act_form()
original = ActFactory(
b_conference=self.current_conference,
bio=self.performer)
data['theact-b_title'] = original.b_title
login_as(self.privileged_user, self)
response = self.client.post(self.url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "The act has the same title")
|
import sys
import os
from call_back import *
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../call_back")
os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.append("/home/wittybrains/airflow_learning/call_back")
|
#!/usr/bin/env python3
import psycopg2
if_total = 0
of_total = 0
def main():
dbconn = psycopg2.connect(host='studsql.csc.uvic.ca', user='paraguay', password='9MM|QscGV4')
cursor = dbconn.cursor()
global if_total
global of_total
print("""Please select what would you like to do, press a number between 1-5:
1: Get some basic information about GnG through 10 queries
2: Setting up a campaign with volunteers, activities, etc.
3: Get some accounting information
4: Membership history
5: Look at the existing attributs in a specific table
""")
while True:
ch = input("Please enter your choice of selection: ")
try:
choice = int(ch)
if choice < 1 or choice > 5:
print("Invalid input!")
print("Please enter a number from 1-5")
continue
except ValueError:
print("Invalid input!")
print("Please enter a number from 1-5")
continue
else:
break
if choice == 1:
print("""Enter a query number from 1-10:
1. List the fund number, name of the funder, and the date of fundraising that
a funder donated between $100 and $2000 during fundraising.
2. List the campaign ID, city of campaign, and length of campaign(in weeks) that
the employee who is in charge of the campaign is a salaried employee.
3. List the name of the funder and payment method who donated more than $50 and
use the same payment method as other funders whose name is May and donated less
than 200 during fundraising.
4. List all the information on website that will take place in Victoria.
5. List campaign ID, status of campaign, manager's first name, and manager's last
name on website for campaigns on the same date, but with different status.
6. List the cost and length(in weeks) of campaign for campaigns that have the longest
length.
7. List the date on website and date for fundraising that the payment method during
fundraising is Visa.
8. List date of fundraising, name of funder, amount, payment method when funder name
is the same as a donor name.
9. Find how many people donated by cheque during fundraising events.
10. Find the average cost of all campaigns.
""")
while True:
ans = input("Please enter a query number from 1-10: ")
try:
val = int(ans)
if val < 1 or val > 10:
print("Invalid input!")
print("Please enter a number from 1-10")
continue
except ValueError:
print("Invalid input!")
print("Please enter a number from 1-10")
continue
else:
break
query1 = "SELECT * FROM q1"
query2 = "SELECT * FROM q2"
query3 = "SELECT * FROM q3"
query4 = "SELECT * FROM q4"
query5 = "SELECT * FROM q5"
query6 = "SELECT * FROM q6"
query7 = "SELECT * FROM q7"
query8 = "SELECT * FROM q8"
query9 = "SELECT * FROM q9"
query10 = "SELECT * FROM q10"
if val == 1:
cursor.execute(query1)
elif val == 2:
cursor.execute(query2)
elif val == 3:
cursor.execute(query3)
elif val == 4:
cursor.execute(query4)
elif val == 5:
cursor.execute(query5)
elif val == 6:
cursor.execute(query6)
elif val == 7:
cursor.execute(query7)
elif val == 8:
cursor.execute(query8)
elif val == 9:
cursor.execute(query9)
elif val == 10:
cursor.execute(query10)
result = cursor.fetchall()
for row in result:
for col in row:
print(col, end=' ')
print()
elif choice == 2:
while True:
print("1: Add new volunteers to the organization.")
print("2: Scheduling events.")
print("3: Look at an campaign's status")
ph2_1 = input("Press 1, 2, or 3 to modify the campaigns:")
try:
ph2_2 = int(ph2_1)
if ph2_2 !=1 and ph2_2 !=2 and ph2_2 !=3:
print("1: Add new volunteers to the organization.")
print("2: Scheduling events.")
print("3: Look at an campaign's status")
print("Press 1, 2, or 3 to modify the campaigns:")
continue
except ValueError:
print("1: Add new volunteers to the organization.")
print("2: Scheduling events.")
print("3: Look at an campaign's status")
print("Press 1, 2, or 3 to modify the campaigns:")
continue
else:
ph2_2 = int(ph2_1)
break
if ph2_2 == 1:
get_max_id = cursor.execute("SELECT max(empid) FROM employee")
max_id = cursor.fetchall()
for i in max_id:
init_empID = i[0]
init_empID += 1
count_e = 0
e1 = 1
while e1 != 0:
lastname = input("Enter employee's last name: ")
firstname = input("Enter employee's first name: ")
eType = input("Enter employee's type(must choose one in between: Volunteer2, Volunteer3, Salaried): ")
eventHrs = input("Enter total hours for a event: ")
insert_query1 = (
"INSERT INTO employee(empID, lastname, firstname, eType, eventHrs)"
"VALUES (%s,%s,%s,%s,%s)"
)
insert_record1 = (init_empID, lastname, firstname, eType, eventHrs)
cursor.execute(insert_query1, insert_record1)
init_empID += 1
dbconn.commit()
count_e += 1
e2 = input("Press 0 to exit, press anything to continue: ")
try:
e1 = int(e2)
if e1 == 0:
break
else:
continue
except ValueError:
continue
print(count_e, "employee(s) have added")
elif ph2_2 == 2:
get_min_cid = cursor.execute("SELECT min(campid) FROM website")
min_cid = cursor.fetchall()
for j in min_cid:
init_campID = j[0]
init_campID += 1
init_status = 'phrase1'
count_c = 0
c1 = 1
while c1 != 0:
c_date = input("Enter a date of a campaign(ex. YYYYMMDD): ")
m_first = input("Enter manager's first name who is in charge of the campaign: ")
m_last = input("Enter manager's last name who is in charge of the campaign: ")
insert_query2 = (
"INSERT INTO website(campid, date, status, manager_firstname, manager_lastname)"
"VALUES (%s,%s,%s,%s,%s)"
)
insert_record2 = (init_campID, c_date, init_status, m_first, m_last)
cursor.execute(insert_query2, insert_record2)
init_campID += 1
dbconn.commit()
count_c += 1
c2 = input("Press 0 to exit, press anything to continue: ")
try:
c1 = int(c2)
if c1 == 0:
break
else:
continue
except ValueError:
continue
print(count_c, "campaign(s) have added")
elif ph2_2 == 3:
while True:
c_id = input("Enter an campID of an campaign: ")
try:
c_id2 = int(c_id)
except ValueError:
print("Please enter an campID!")
continue
else:
c_id2 = int(c_id)
break
get_status = "SELECT status from website where campid=%s"
c_id3 = (c_id2,)
cursor.execute(get_status, c_id3)
get_status2 = cursor.fetchall()
if not get_status2:
print("The campid is not exist.")
else:
print(get_status2[0][0])
elif choice == 3:
in_f = cursor.execute("SELECT date, amount from fundraising")
inf_table = cursor.fetchall()
print(" date | amount")
print("----------------")
for row in inf_table:
for col in row:
print(col, end=' ')
print()
ift = cursor.execute("SELECT amount FROM fundraising")
ift2 = cursor.fetchall()
for row in ift2:
for col in row:
if_total += col
print("Total inflows of all fundraising: " + str(if_total))
print()
of = cursor.execute("SELECT website.date, campaign.cost FROM website JOIN campaign on website.campid=campaign.campid")
of_table = cursor.fetchall()
print(" date | cost")
print("----------------")
for row in of_table:
for col in row:
print(col, end=' ')
print()
oft = cursor.execute("SELECT cost FROM campaign")
oft2 = cursor.fetchall()
for row in oft2:
for col in row:
of_total += col
print("Total outflows of all campaigns: " + str(of_total))
print()
print("Total inflows vs. outflows")
if_total_b = int(if_total/1000)
of_total_b = int(of_total/1000)
print("inflows: " + if_total_b * '|' + ' ' + str(if_total))
print("outflows: " + of_total_b * '|' + ' ' + str(of_total))
elif choice == 4:
while True:
print("1: View membership history")
print("2: Add attributes to campaign")
ph4_1 = input("Press 1 or 2 to modify:")
try:
ph4_2 = int(ph4_1)
if ph4_2 !=1 and ph4_2 !=2:
print("1: View membership history")
print("2: Add attributes to campaign")
print("Press 1 or 2 to modify")
continue
except ValueError:
print("1: View membership history")
print("2: Add attributes to campaign")
print("Press 1 or 2 to modify:")
continue
else:
ph4_2 = int(ph4_1)
break
if ph4_2 == 1:
emp1 = 1
while True:
emp_id = input("Enter an empid of an employee (greater than 0): ")
try:
emp_id2 = int(emp_id)
if emp_id2 < 1:
print("Invalid input!")
continue
else:
emp_id2 = int(emp_id)
break
except ValueError:
continue
his_tb = "SELECT * FROM q16 where empid=%s"
emp_id3 = (emp_id2,)
cursor.execute(his_tb, emp_id3)
his_tb2 = cursor.fetchall()
if not his_tb2:
print("This empid is not exist.")
else:
print("campid empid firstname lastname city events lengweeks date")
print("--------------------------------------------------------------------")
for row in his_tb2:
for col in row:
print(col, end=' | ')
print()
elif ph4_2 == 2:
al1 = 1
count_c = 0
while al1 != 0:
tb_cname = input("Enter a column name that you want to add: ")
tb_al = "ALTER TABLE campaign ADD %s VARCHAR(255)" % (tb_cname)
cursor.execute(tb_al)
dbconn.commit()
count_c += 1
al2 = input("Press 0 to exit. Press anything else to continue: ")
try:
al1 = int(al2)
if al1 == 0:
break
else:
continue
except ValueError:
continue
print(str(count_c) + " attributes have been added successfully")
elif choice == 5:
tb = input("Enter a table name: ")
q1 = "SELECT * FROM %s" % (tb)
cursor.execute(q1)
col_name = [desc[0] for desc in cursor.description]
print("The existing attributes from this table are: ")
for i in range(len(col_name)):
print(col_name[i])
cursor.close()
dbconn.close()
if __name__ == "__main__": main()
|
from core.permissions import BasePermission
from.models import RolePermission
class CanAddEmployee(BasePermission):
permission_name = RolePermission.EMPLOYEE_CREATE
class CanEditEmployee(BasePermission):
permission_name = RolePermission.EMPLOYEE_EDIT
class CanViewEmployee(BasePermission):
permission_name = RolePermission.EMPLOYEE_READ
class CanDeleteEmployee(BasePermission):
permission_name = RolePermission.EMPLOYEE_DELETE
|
#!/usr/bin/python3
import os, subprocess, platform, argparse
# default paths to anaconda and data roots
ANACONDA_DIR = os.path.join("C:\\", "Apps", "Anaconda3") if platform.system() == "Windows" else os.path.join("~", "anaconda3")
DEFAULT_COUNT, DEFAULT_NAME, DEFAULT_CONFIG = 1, "DemoImpModel", "ConfigImp.yaml"
# parse args for path replacements & args for the job start command execution
parser = argparse.ArgumentParser()
parser.add_argument("--anaconda_dir", type=str, help="path to the root directory of your Anaconda installation")
parser.add_argument("--count", type=int, help="count variable for IMP")
parser.add_argument("--name", type=str, help="Name of job")
parser.add_argument("--config", type=str, help="config file name (within imp_model directory)")
parser.add_argument("--output_file", type=str, help="file to redirect imp script execution into, rather than stdout")
args = parser.parse_args()
if args.anaconda_dir is not None:
ANACONDA_DIR = args.anaconda_dir
outfile = args.output_file if args.output_file is not None else "prep_hyperp_imp_v2_trace.txt"
with open(outfile, 'w') as f:
subprocess.run(
"{0} prep_hyperp_imp_v2.py --count={1} --name={2} --config={3}".format(
os.path.join(ANACONDA_DIR, "python.exe"),
args.count if args.count is not None else DEFAULT_COUNT,
args.name if args.name is not None else DEFAULT_NAME,
args.config if args.config is not None else DEFAULT_CONFIG,
),
stdout=f,
stderr=(subprocess.STDOUT)
) |
import matplotlib.pyplot as plt
import sys
a=1
x=[]
y=[]
f=open("read.txt","r")
x1=f.read()
x1=x1.split(" ")
for i in x1:
y.append(int(i))
x.append(a)
a+=1
f.close()
plt.plot(x,y)
plt.plot(x,y,"ro")
plt.xticks(x)
plt.yticks(y)
plt.xlabel('Number')
plt.ylabel('Square')
plt.grid()
plt.title("GCI-MakeFile-Demo(Square Numbers)")
plt.show()
|
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA512
import requests
import datetime
import Crypto
import json
import threading
import balance
f = open('key.pem')
key = RSA.import_key(f.read())
f.close()
publicExponent = key.publickey().n
'''
res = requests.get('https://gw.kaist.ac.kr/broadcast/get?reverse=1&&limit=100')
res = res.json()
nonce = 0
diff = int(json.loads(res[0]['block'])['difficulty'])
block = {'type': 'block', 'transactions': [], 'timestamp': str(datetime.datetime.utcnow()), 'reward': hex(key.publickey().n)[2:], 'difficulty':'9', 'nonce':'', 'parent': res[0]['hash']}
'''
def find_hash_index(hash_, blocks):
for i in range(len(blocks)):
if balance.is_valid_block(blocks[i]) and hash_ == blocks[i]['hash']:
return i
return -1
def find_chain_length(hash_, blocks):
length_ = 0
index = find_hash_index(hash_, blocks)
while(index > -1):
length_ += 1
index = find_hash_index(json.loads(blocks[index]['block'])['parent'], blocks)
return length_
# returns the end of longest chain
def build_chain():
global block
res = requests.get('https://gw.kaist.ac.kr/broadcast/get?reverse=1&&limit=200')
res = res.json()
for i in range(len(res)):
if not balance.is_valid_block(res[i]):
continue
a = find_chain_length(res[i]['hash'], res)
if a > 50:
block = {'type': 'block', 'transactions': [], 'timestamp': str(datetime.datetime.utcnow()), 'reward': hex(key.publickey().n)[2:], 'difficulty':'9', 'nonce':'', 'parent': res[i]['hash']}
return (block, json.loads(res[i]['block'])['difficulty'])
haha = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l']
def findnonce(block, diff):
nonce = '00000000000000000000000000000000'*2
block['difficulty'] = str(diff)
timer = str(datetime.datetime.utcnow()).split(' ')
block['timestamp'] = timer[0]+'T'+timer[1]
block['nonce'] = nonce
J = json.dumps((block))
for i in haha:
for k in haha:
if i == k:
continue
for j in range(64):
J = J[:635 + j] + i + k + J[637 + j:]
# print(J[635:701])
hash512 = SHA512.new(str.encode(J))
# print('\r' + hash512.hexdigest()[:10],'\t\t' , nonce, end='')
if int(hash512.hexdigest(), 16) < 2 ** (492 - 4):
print('\r',hash512.hexdigest())
if int(hash512.hexdigest(), 16) < 2 ** (492 - int(diff, 16)):
print(hash512.hexdigest())
my_block = {'hash': hash512.hexdigest(), 'type': 'block_hash', 'block': J}
r = requests.post('https://gw.kaist.ac.kr/broadcast/post', json = my_block)
print('done!!\n\n')
return nonce
return 0
def findnonce_np(block, init, diff, dd):
nonce = init
while(True):
hash512 = SHA512.new()
block['nonce'] = str(hex(nonce))[:2]
block['timestamp'] = str(datetime.datetime.utcnow()).split(' ')
block['difficulty'] = dd
hash512.update(str.encode(json.dumps(block)))
# print('\r' + hash512.hexdigest()[:10],'\t\t' , nonce, end='')
if int(hash512.hexdigest(), 16) < 2 ** (492):
print('\r',hash512.hexdigest())
if int(hash512.hexdigest(), 16) < 2 ** (492 - dd):
# print(hash512.hexdigest())
my_block = {'hash': hash512.hexdigest(), 'type': 'block_hash', 'block': json.dumps(block)}
r = requests.post('https://gw.kaist.ac.kr/broadcast/post', json = my_block)
print('done!!\n\n')
return True
nonce += diff
diff = 9
nonce = 0
if __name__ == '__main__':
#if balance.sys.argv[1] == '-h':
# while(True):
# block = {'type': 'block', 'transactions': [], 'timestamp': str(datetime.datetime.utcnow()), 'reward': hex(key.publickey().n)[2:], 'difficulty':'9', 'nonce':'', 'parent': balance.sys.argv[2]}
# findnonce(block, diff)
while(True):
block, diff = build_chain()
print('\r', "now hash: ", block['parent'][:12], '\t', diff)
findnonce(block, diff)
|
"""
This is a script which trains the BalancedBagginClassifier on top of the best
word2vec model, on the train + validation dataset.
"""
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.externals import joblib
import pandas as pd
if __name__ == "__main__":
print("Loading data")
data = pd.concat([
pd.read_csv("/data/SO_data/downvoter/wv_train_processed_data.csv"),
pd.read_csv("/data/SO_data/downvoter/wv_val_processed_data.csv")
])
body_data = joblib.load("./final/vectorized_data/body_data.pkl")
title_data = joblib.load("./final/vectorized_data/title_data.pkl")
body_model = BalancedBaggingClassifier(n_estimators=100,
n_jobs=-1,
ratio="not minority")
title_model = BalancedBaggingClassifier(n_estimators=100,
n_jobs=-1,
ratio="not minority")
labels = data.score < 0
print("Fitting body model")
body_model.fit(body_data, labels)
print("Fitting title model")
title_model.fit(title_data, labels)
joblib.dump(body_model, "./final/body_model.pkl")
joblib.dump(title_model, "./final/title_model.pkl")
print("All done!")
|
import os
from numpy import *
from timeit import *
import random
def main():
#file = openF('graph.txt')
#graph = loadGraph(file)
graph = randomGraph(10)
tour = TSP(graph)
#print("TSP tour = ")
print()
print(tour)
a=tourLength(tour, graph)
print(a)
tourZgadn = Zgadn(graph)
#print("Zhadn tour = ")
print(tourZgadn)
b=tourLength(tourZgadn, graph)
print(b)
#print("Расхождение:")
print(b/a)
#tour = Gran(graph)
#print("Grand tour = ")
#print(tour)
#print(graph)
#print(tourLength(tour, graph))
#fib(55)
#file.close()
def openF(filename):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, filename)
file = open(filename, 'r')
return file
def loadGraph(file):
matrix = array([[]])
for line in file:
m_line = array([])
for val in line.split():
if (val == 'inf'):
m_line = append(m_line, [float("infinity")])
else:
m_line = append(m_line, [float(val)])
matrix = addLineInMatrix(matrix, m_line)
return matrix
def addLineInMatrix(matrix, line):
if matrix != [[]]:
matrix = append(matrix, [line], 0)
else:
matrix = [line]
return matrix
def isValid(graph, i, j, usedVer):
if (i == j):
return False
if (j in usedVer):
return False
return True
def TSP(graph, start=0, used_ver=[]):
used_ver.append(start)
min_length = float('infinity')
min_sub_tour = []
if (len(used_ver) == len(graph)):
#print("length of " + str(used_ver) + " is " + str(tourLength(used_ver, graph)))
min_sub_tour = list(used_ver)
else:
for i in range(len(graph)):
if (not(i in used_ver)):
sub_tour = TSP(graph, i, used_ver)
sub_tour_length = tourLength(sub_tour, graph)
if (sub_tour_length < min_length):
min_sub_tour = sub_tour
min_length = sub_tour_length
used_ver.pop()
return min_sub_tour
def Zgadn(graph):
temp_index = 0
count = len(graph)
min = float('infinity')
minIndex = 0
for i in range(count):
for j in range(count):
if (graph[i][j] < min):
min = graph[i][j]
minIndex = i
visited_town = []
min_sub_tour = [minIndex]
for i in range(count):
visited_town.append(False)
visited_town[minIndex]=True
for i in range(count - 1):
min_length = float('infinity')
for j in range(count):
if (not (visited_town[j]) and (minIndex != j) and(graph[minIndex][j] < min_length)):
min_length = graph[minIndex][j]
temp_index = j
minIndex = temp_index
min_sub_tour.append(temp_index)
visited_town[temp_index] = True
return min_sub_tour
def reGran(graph):
if(graph == []):
return []
graph = reducuctGraph(graph)
marks = costsFromGraph(graph)
m = arrayMax(marks)
graph[m[1]][m[0]] = float('infinity')
subGraph = deleteFromGraph(graph, m)
sub_m_l = reGran(subGraph)
for i in range(len(sub_m_l)):
if(sub_m_l[i][0]>=m[0]):
sub_m_l[i]=(sub_m_l[i][0]+1,sub_m_l[i][1])
if(sub_m_l[i][1]>=m[1]):
sub_m_l[i]=(sub_m_l[i][0],sub_m_l[i][1]+1)
return [m] + sub_m_l
def Gran(graph, start=0, user_ver=[]):
rebs = reGran(array(graph))
tour = [rebs[0][1]]
rebs.remove(rebs[0])
#print(rebs)
while len(rebs)!=0:
for reb in rebs:
if(reb[0] == tour[len(tour)-1]):
tour.append(reb[1])
rebs.remove(reb)
return tour
def reducuctGraph(graph):
count = len(graph)
for row in graph:
min_item = min(row)
for i in range(count):
row[i] -= min_item
for i in range(count):
min_item = graph[0][i]
for j in range(1, count):
if (graph[j][i] < min_item):
min_item = graph[j][i]
for j in range(0, count):
graph[j][i] -= min_item
return graph
def deleteFromGraph(graph, mark_coord):
newGraph = []
count = len(graph)
for i in range(count):
if(i!=mark_coord[0]):
line = []
for j in range(count):
if(j != mark_coord[1]):
line.append(graph[i][j])
newGraph.append(line)
return newGraph
def costsFromGraph(graph):
count = len(graph)
arrWithCosts = array(graph)
arrayZero(arrWithCosts)
for i in range(count):
for j in range(count):
if (graph[i][j] == 0):
arrWithCosts[i][j] = minInCol(graph, i, j) + minInRow(graph, i, j)
return arrWithCosts
def minInCol(graph, row, column):
min_item = float('infinity')
count = len(graph)
for j in range(0, count):
if (graph[j][column] < min_item and j!=row):
min_item = graph[j][column]
return min_item
def minInRow(graph, row, column):
min_item = float('infinity')
count = len(graph)
for i in range(0, count):
if (graph[row][i] < min_item and i != column):
min_item = graph[row][i]
return min_item
def arrayZero (array):
count = len(array)
for i in range(count):
for j in range(count):
array[i][j] = -1
def arrayMax(array):
count = len(array)
max_item = -1
a = 0
b = 0
for j in range(count):
for i in range(count):
if (array[i][j] > max_item):
max_item = array[i][j]
a = i
b = j
return (a, b)
def tourLength(tour, graph):
if (len(tour) < 2):
return 0
length = 0
for i in range(0, len(tour) - 1):
_from = tour[i]
_to = tour[i + 1]
length += graph[_from][_to]
length += graph[tour[len(tour) - 1]][tour[0]]
return length
def randomGraph(n):
matrix = array([[]])
for i in range(n):
m_line = array([])
for j in range(0,n):
if(j < i):
m_line = append(m_line, matrix[j][i])
elif (i == j):
m_line = append(m_line, [float("infinity")])
else:
m_line = append(m_line, [float(random.uniform(1, 20))])
matrix = addLineInMatrix(matrix, m_line)
return matrix
def fib(n):
list=[]
a=1
b=1
list.append(a)
list.append(b)
temp = 0
if(n<=2):
return 1
while (b+a < n):
temp = b
b = a + b
list.append(b)
a = temp
print(list)
return list
#for i in range(50):
# main()
global tgraph
tgraph = randomGraph(10)
t=timeit('TSP(graph)', setup='from __main__ import tgraph; from __main__ import TSP; graph = tgraph', number=1)
print("TSP time = ", t)
t=timeit('Zgadn(graph)', setup='from __main__ import tgraph; from __main__ import Zgadn; graph = tgraph', number=1)
print("Zgadn time = ", t)
#t=timeit('Gran(graph)', setup='from __main__ import tgraph; from __main__ import Gran; graph = tgraph', number=0)
#print("Gran time = ", t) |
#code for basic geocoding search
import requests
import sys
import json
url = "https://us1.locationiq.com/v1/search.php"
#change this to the input
query = sys.argv[1]
data = {
'key': '8d6879e1df3d64',
'q': query,
'format': 'json'
}
response = requests.get(url, params=data)
rjson = response.json()
#latitude
lat = rjson[0]['lat']
#longitude
lon = rjson[0]['lon']
#display name
display = rjson[0]['display_name']
#print(lat, lon, display) |
xa=float(input("x de a:"))
ya=float(input("y de a:"))
xb=float(input("x de b:"))
yb=float(input("y de b:"))
xm=(xa+xb)/2
ym=(ya+yb)/2
print(float(round(xm,1)))
print(float(round(ym,1))) |
import audioop
import numpy as np
import pyaudio
import wave
CHUNK_SIZE = 1024
class Music:
def __init__(self, path = "./stereo.wav"):
self.path = path
self.wf = wave.open(path, 'rb')
self.width = self.wf.getsampwidth()
self.pa = pyaudio.PyAudio()
self.stream = self.pa.open(format = self.pa.get_format_from_width(self.width),
channels = 6,
rate = self.wf.getframerate(),
output = True)
def stop(self):
self.stream.stop_stream()
self.stream.close()
self.pa.terminate()
def replay(self):
# バイト列を取得
# [L0, R0, L1, R1, L2, R2, ...]
src_frames = self.wf.readframes(CHUNK_SIZE)
while src_frames != '':
# バイト列を取得
# [L0, R0, L1, R1, L2, R2, ...]
src_frames = self.wf.readframes(CHUNK_SIZE)
# L, Rに分割
l_frames = audioop.tomono(src_frames, self.width, 1, 0)
r_frames = audioop.tomono(src_frames, self.width, 0, 1)
# 各chに流す音量を調整
# L, FL, FR, R, Bの順
volumes = [[1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
six_ch_frames = self.set_6ch_audio(l_frames, r_frames, volumes)
# 6chオーディオをstreamに渡す
# [FL0, FR0, CT0, BA0, RL0, RR0, ...]
self.stream.write(six_ch_frames)
self.stop()
def set_6ch_audio(self, l_frames, r_frames, volumes):
all_frame = [ [np.fromstring(audioop.mul(l_frames, self.width, volumes[0][i]), dtype=np.int16)]
if volumes[0][i] != 0
else [np.fromstring(audioop.mul(r_frames, self.width, volumes[1][i]), dtype=np.int16)]
for i in range(5)]
audio = np.concatenate(all_frame, axis=0)
# insertの第2引数がミュートするチャンネル
audio = np.insert(audio, 4, 0, axis=0).T.astype(np.int16).tostring()
return audio
if __name__ == '__main__':
music = Music()
music.replay()
|
from django.urls import path
from .views import *
app_name = 'blog'
urlpatterns = [
# Example: /
path('', PostLV.as_view(), name='index'),
# Example: /post/ (same as /)
path('post/', PostLV.as_view(), name='post_list'),
# Example: /post/django-example/
path('post/<slug:slug>/', PostDV.as_view(), name='post_detail'),
# Example: /archive/
path('archive/', PostAV.as_view(), name='post_archive'),
# Example: /2012/
path('<int:year>/', PostYAV.as_view(), name='post_year_archive'),
# Example: /2012/nov/
path('<int:year>/<str:month>/', PostMAV.as_view(), name='post_month_archive'),
# Example: /2012/nov/10/
path('<int:year>/<str:month>/<int:day>/', PostDAV.as_view(), name='post_day_archive'),
# Example: /today/
path('today/', PostTAV.as_view(), name='post_today_archive'),
# Example: /tag/
path('tag/', TagTV.as_view(), name='tag_cloud'),
# Example: /tag/tagname/
path('tag/<str:tag>/', PostTOL.as_view(), name='tagged_object_list'),
# Example: /search/
path('search/', SearchFormView.as_view(), name='search'),
]
|
"""
伪代码:
去除平均值
计算协方差矩阵
计算协方差矩阵的特征值和特征向量
将特征值从大到小排序
保留最上面的N个特征向量
将数据转换到上述N个特征向量构建的新空间中
"""
import numpy as np
def loadDataSet(filename, delim="\t"):
f = open(filename)
strArr = [line.strip().split(delim) for line in f.readlines()]
arr = [list(map(float, line)) for line in strArr]
return np.mat(arr)
def pca(dataMat, topNfeat=9999999):
# 去平均值
meanVals = np.mean(dataMat, axis=0)
meanRem = dataMat - meanVals
# 计算协方差矩阵
covMat = np.cov(meanRem, rowvar=False)
# 计算协方差矩阵的特征值和特征向量
eigVals, eigVecs = np.linalg.eig(np.mat(covMat))
# 特征值特征向量排序
eigValInd = np.argsort(eigVals)
# 选取特征值
eigValInd = eigValInd[:-(topNfeat+1):-1]
# 选取对应的特征向量
reEigVecs = eigVecs[:,eigValInd]
# 将数据转换到新的空间
lowDimData = meanRem*reEigVecs
# 数据重构
reconMat =(lowDimData * reEigVecs.T) + meanVals
return lowDimData, reconMat
data = loadDataSet("../dataset/testSet.txt")
lowdimdata, reconMat = pca(dataMat=data, topNfeat=1)
|
import sys
zeefile = open(sys.argv[1])
for x in zeefile:
holder = x.strip().split()
indienums = holder[0]
totalnums = []
for x in indienums:
x = int(x)
totalnums.append(x ** len(indienums))
if str(sum(totalnums)) == indienums:
print "True"
else:
print "False"
|
#Problem ID: COVIDLQ
#Problem Name: COVID Pandemic and Long Queue
for _ in range(int(input())):
n = int(input())
s = list(input().split())
z = 5
i = 0
r = True
while i < n:
if(s[i] == '1'):
if(z<5):
r = False
break
else:
z = 0
else:
z+=1
i += 1
if(r):
print('YES')
else:
print('NO')
|
import logging
import pytest
from ocs_ci.framework.pytest_customization.marks import (
system_test,
ignore_leftovers,
polarion_id,
skipif_ocs_version,
)
from ocs_ci.framework.testlib import E2ETest
log = logging.getLogger(__name__)
@system_test
@ignore_leftovers
@polarion_id("OCS-2716")
@skipif_ocs_version("<4.9")
class TestMCGRecovery(E2ETest):
"""
Test MCG system recovery
"""
@pytest.mark.parametrize(
argnames=["bucket_amount", "object_amount"],
argvalues=[pytest.param(2, 15)],
)
def test_mcg_db_backup_recovery(
self,
setup_mcg_system,
bucket_amount,
object_amount,
verify_mcg_system_recovery,
snapshot_factory,
noobaa_db_backup_and_recovery,
):
mcg_sys_dict = setup_mcg_system(bucket_amount, object_amount)
noobaa_db_backup_and_recovery(snapshot_factory=snapshot_factory)
verify_mcg_system_recovery(mcg_sys_dict)
|
# With list comprehension
vector = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
L = [number for list in vector for number in list]
print(L)
# Prints [1, 2, 3, 4, 5, 6, 7, 8, 9]
# equivalent to the following plain, old nested loop:
vector = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
L = []
for list in vector:
for number in list:
L.append(number)
print(L)
# Prints [1, 2, 3, 4, 5, 6, 7, 8, 9] |
from typing import List
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
"""
Brute Force
Time Complexity: O(n^3)
Space Complexity: O(n)
"""
count = 0
# Considering every possible subarray O(n^2)
for start in range(len(nums)):
for end in range(start + 1, len(nums) + 1):
# Calculate sum O(n)
if sum(nums[start:end]) == k:
count += 1
return count
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Delete indicators and groups which have been created for the tests."""
import hashlib
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".")))
import validator
def _create_xid(type_, name):
# if given a file indicator, make sure the name is based on the first hash
if type_ == 'file':
name = name.split(' : ')[0]
xid_string = '{}-{}'.format(type_, name)
hash_object = hashlib.sha256(xid_string.encode('utf-8'))
return hash_object.hexdigest()
def _delete_groups(tcex):
batch = tcex.batch(tcex.args.api_default_org, action='Delete')
groups = validator.get_groups(tcex)
for group in groups:
group['xid'] = _create_xid(group['type'], group['name'])
batch.add_group(group)
batch.submit_all()
def _delete_indicators(tcex):
# TODO: implement
batch = tcex.batch(tcex.args.api_default_org, action='Delete')
indicators = validator.get_indicators(tcex)
for indicator in indicators:
indicator['xid'] = _create_xid(indicator['type'], indicator['summary'])
batch.add_indicator(indicator)
batch.submit_all()
def clean(tcex):
"""Delete all indicators and groups in the source."""
_delete_groups(tcex)
_delete_indicators(tcex)
|
class UserModel:
def __init__(self,id,name,password):
self.id = id
self.user_name=name
self.password=password
|
# time O(logN)
# stack O(logN) call stack
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
self.min_idx , self.max_idx = -1, -1
def helper(left, right, nums, target):
if left<=right:
mid = (left+right)//2
if nums[mid] == target:
if self.min_idx==-1:
self.min_idx , self.max_idx = mid, mid
else:
self.min_idx = min(self.min_idx, mid)
self.max_idx = max(self.max_idx, mid)
if target<=nums[mid]:
helper(left, mid-1, nums, target)
if target>=nums[mid]:
helper(mid+1, right, nums, target)
helper(0, len(nums)-1, nums, target)
return [self.min_idx, self.max_idx] |
from tkinter import Toplevel, BOTH
from tkinter.ttk import *
class CustomDialog(Toplevel):
"""
Class to open dialogs which uses ttk instead of tkinter for style
consistency. This class is intended as a base class for custom dialogs
Parameters
----------
parent : `enrich2.gui.configurator.Configurator`
A parent window (the application window)
title : `str`
The dialog window title
body_frame_text: `str`, default: ''
Specify text to house all elements in a `LabelFrame`
Methods
-------
body
Create dialog body, overrides TopLevel.
destroy
Handles the destory window event.
buttonbox
Adds a standard button box to the dialog.
ok
Handles the 'ok' button press event.
cancel
Handles the 'cancel' button press event.
validate
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
apply
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
See Also
--------
:py:class:`~Toplevel`
"""
def __init__(self, parent, title=None, body_frame_text=""):
"""
Initialize a dialog.
"""
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
self.row = 0
self.column = 0
self.overall_frame = Frame(self)
if body_frame_text:
self.body_frame = LabelFrame(self.overall_frame, text=body_frame_text)
else:
self.body_frame = Frame(self.overall_frame)
self.initial_focus = self.body(self.body_frame)
self.body_frame.grid(
column=self.column, row=self.row, sticky="nsew", padx=5, pady=5
)
self.rowconfigure(self.row, weight=1)
self.columnconfigure(self.column, weight=1)
self.row += 1
self.button_box = Frame(self.overall_frame)
self.buttonbox()
self.button_box.grid(
column=self.column, row=self.row, sticky="e", padx=5, pady=5
)
self.rowconfigure(self.row, weight=1)
self.columnconfigure(self.column, weight=1)
self.row += 1
self.column += 1
self.overall_frame.pack(fill=BOTH, expand=True)
self.resizable(False, False)
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry(
"+%d+%d" % (parent.winfo_rootx() + 50, parent.winfo_rooty() + 50)
)
self.deiconify() # become visible now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
"""
Destroy the window
"""
self.initial_focus = None
Toplevel.destroy(self)
def body(self, master):
"""
Create dialog body.
Return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
"""
pass
def buttonbox(self):
"""
Add standard button box.
Override if you do not want the standard buttons
"""
w = Button(self.button_box, text="OK", width=10, command=self.ok)
w.grid(column=0, row=0, padx=5, pady=5)
w = Button(self.button_box, text="Cancel", width=10, command=self.cancel)
w.grid(column=1, row=0, padx=5, pady=5)
self.button_box.rowconfigure(0, weight=1)
self.button_box.columnconfigure(0, weight=1)
self.button_box.columnconfigure(1, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
def ok(self, event=None):
"""
Handles the 'ok' button click.
"""
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
self.parent.refresh_treeview()
finally:
self.cancel()
def cancel(self, event=None):
"""
Handles the 'cancel' button click event.
"""
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
def validate(self):
"""
Validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
"""
return 1
def apply(self):
"""
Process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
"""
pass
|
import math
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import StringProperty,NumericProperty,ListProperty,DictProperty
from kivy.lang import Builder
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.app import App
Builder.load_string('''
#:import math math
<CircularMenu>:
mbok:mbok
len_wg:180
FloatLayout:
id:mbok
size_hint: None, None
pos_hint: {"center_x":0.5, "center_y":0.5}
size: 1*min(root.size), 1*min(root.size)
Label:
pos_hint:{"center_x": 0.5+0.42*math.sin(math.pi/root.len_wg*(360-root.angle)), "center_y": 0.5+0.42*math.cos(math.pi/root.len_wg*(360-root.angle))}
text:"1"
''')
class BtnMenu(Button):
data=StringProperty("")
class CircularMenu(FloatLayout):
angle = NumericProperty(10)
listsource=ListProperty(["","","","","","","","","","","",""])
listdata=ListProperty(["a","b","c","d","e","f","g","f","i","j","k","l"])
listtext=ListProperty(["a","b","c","d","e","f","g","f","i","j","k","l"])
len_wg=NumericProperty(180)
count=NumericProperty(0)
def __init__(self, *args, **kwargs):
super(CircularMenu, self).__init__(*args,**kwargs)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
y = (touch.y - self.center[1])
x = (touch.x - self.center[0])
calc = math.degrees(math.atan2(y, x))
self.prev_angle = calc if calc > 0 else 360 + calc
self.tmp = self.angle
return super(CircularMenu, self).on_touch_down(touch)
def on_touch_move(self, touch):
self.ids["mbok"].clear_widgets()
if self.collide_point(*touch.pos):
y = (touch.y - self.center[1])
x = (touch.x - self.center[0])
calc = math.degrees(math.atan2(y, x))
new_angle = calc if calc > 0 else 360 + calc
self.angle = self.tmp + (new_angle - self.prev_angle) % 360
for i in range(len(self.listsource)):
self.ids["mbok"].add_widget(BtnMenu(text=self.listtext[i],
data=self.listdata[i],
on_press=self.on_menu_press,
size_hint=(.1,.1),
pos_hint={"center_x": 0.5+0.42*math.sin(math.pi/self.len_wg*(360-i*30-self.angle)),
"center_y": 0.5+0.42*math.cos(math.pi/self.len_wg*(360-i*30-self.angle))}))
def on_angle(self, a, b):
pass
def on_menu_press(self,a):
print(a.data)
class MyApp(App):
def build(self):
return CircularMenu()
if __name__=="__main__":
MyApp().run()
|
#!/usr/bin/env python3
from sys import stderr, exit, argv
import re
import random
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from increasing_subsequence_lib import *
# METADATA OF THIS TAL_SERVICE:
problem="increasing_subseq"
service="min_k_col"
args_list = [
('coloring',str),
('lang',str),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
#if not ENV['silent']:
# TAc.print(LANG.opening_msg, "green")
TAc.print("\nYou will be given a sequence of numbers, and you have to enter a minimum coloring, where all elements of the same color represent a non-increasing sequence.", "green")
T = generate_random_seq(10,100)
string_T = list_to_string(T)
TAc.print("T: "+string_T+"\n\n","green")
mdc = min_decreasing_col(T)
n_col = n_coloring(mdc)
TAc.print("Insert your coloring (example: 1 3 1 ... where 1 = color_1):\n","green")
k = input()
ok = bool(re.match(r"^((([1-9][0-9]{0,9} *){9}[1-9][0-9]{0,9}))$", k))
if ok:
color = parse_input(k)
else:
TAc.print("\n\nWRONG INPUT FORMAT: this is not a sequence of color (example: 1 3 1 ... where 1 = color_1).\n", "red")
exit(0)
n_col_user = n_coloring(color)
if n_col == n_col_user:
TAc.print("\n\nYES, it's the minimum coloring of T\n", "red")
else:
TAc.print("\n\nNO, it isn't the minimum coloring of T\n", "red")
|
import threading
from time import sleep
from model.experiment import Experiment
experiment = Experiment()
experiment.load_config('experiment.yml')
experiment.initialize()
t = threading.Thread(target=experiment.start_scan)
t.start()
while t.is_alive():
print(experiment.i)
sleep(1)
experiment.keep_running = False
experiment.save_data() |
import unittest
from pylatexenc import _util
class TestLineNumbersCalculator(unittest.TestCase):
def test_simple(self):
s = """\
one
two
three
four
five
""".lstrip()
ln = _util.LineNumbersCalculator(s)
self.assertEqual( ln.pos_to_lineno_colno(0), (1,0) )
self.assertEqual( ln.pos_to_lineno_colno(1), (1,1) )
self.assertEqual( ln.pos_to_lineno_colno(2), (1,2) )
self.assertEqual( ln.pos_to_lineno_colno(3), (1,3) )
self.assertEqual( ln.pos_to_lineno_colno(4), (2,0) )
self.assertEqual( ln.pos_to_lineno_colno(5), (2,1) )
self.assertEqual( ln.pos_to_lineno_colno(6), (2,2) )
self.assertEqual( ln.pos_to_lineno_colno(7), (2,3) )
self.assertEqual( ln.pos_to_lineno_colno(8), (3,0) )
self.assertEqual( ln.pos_to_lineno_colno(9), (3,1) )
self.assertEqual( ln.pos_to_lineno_colno(23), (5,4) )
def test_as_dict(self):
s = """\
one
two
three
four
five
""".lstrip()
ln = _util.LineNumbersCalculator(s)
self.assertEqual( ln.pos_to_lineno_colno(9, as_dict=True),
{ 'lineno': 3,
'colno': 1 } )
|
#!/usr/bin/env python2.7
import dicom, cv2, re
import os, fnmatch, sys
from keras.callbacks import *
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
from itertools import zip_longest
from scipy.misc import imsave
from helpers import center_crop_3d, center_crop, lr_poly_decay, get_SAX_SERIES
import pylab
import matplotlib.pyplot as plt
from CardiacImageDataGenerator import CardiacImageDataGenerator, CardiacTimeSeriesDataGenerator
from unet_model_time import unet_res_model_time
from unet_res_model_Inv import unet_res_model_Inv
from DataIOProc import DataIOProc
seed = 1234
np.random.seed(seed)
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = 'D:\cardiac_data\Sunnybrook'
TEMP_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Temp',
'Temp')
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'TrainingDataDICOM')
TRAIN_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'TrainingOverlayImage')
TRAIN_AUG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database Augmentation')
DEBUG_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'Debug')
DEBUG_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart3',
'Debug')
DEBUG_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart3',
'Debug')
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database DICOMPart2',
'ValidationDataDICOM')
VAL_OVERLAY_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database OverlayPart2',
'ValidationDataOverlay')
class Contour(object):
def __init__(self, ctr_endo_path, ctr_epi_path, ctr_p1_path, ctr_p2_path, ctr_p3_path):
self.ctr_endo_path = ctr_endo_path
self.ctr_epi_path = ctr_epi_path
self.ctr_p1_path = ctr_p1_path
self.ctr_p2_path = ctr_p2_path
self.ctr_p3_path = ctr_p3_path
match = re.search(r'\\([^\\]*)\\contours-manual\\IRCCI-expert\\IM-0001-(\d{4})-.*', ctr_endo_path) #it always has endo
self.case = match.group(1)
self.img_no = int(match.group(2))
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation):
center_index = contour.img_no
center_file = 'IM-0001-%04d.dcm' % (contour.img_no)
center_file_path = os.path.join(data_path, contour.case, 'DICOM', center_file) #modified by C.Cong
center = dicom.read_file(center_file_path)
center_slice_pos = center[0x20, 0x1041]
center_img = center.pixel_array.astype('int')
h, w = center_img.shape
img_arr = np.zeros((num_phases, h, w), dtype="int")
for i in range (num_phases):
idx = int(center_index + (i - int(num_phases/2))*phase_dilation)
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
#If
if os.path.isfile(full_path) == False:
if idx < center_index:
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
else:
idx = idx - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if(f_slice_pos.value != center_slice_pos.value):
idx = idx + num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
idx = idx - num_phases_in_cycle - num_phases_in_cycle
filename = 'IM-0001-%04d.dcm' % (idx)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename)
if os.path.isfile(full_path) == True:
f = dicom.read_file(full_path)
f_slice_pos = f[0x20, 0x1041]
if (f_slice_pos.value != center_slice_pos.value):
raise AssertionError('Cannot find neighbor files for: {:s}'.format(center_file_path))
img_arr[i] = f.pixel_array.astype('int')
return img_arr
def read_contour(contour, data_path, num_classes, num_phases, num_phases_in_cycle, phase_dilation):
#filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
filename = 'IM-0001-%04d.dcm' % (contour.img_no)
full_path = os.path.join(data_path, contour.case, 'DICOM', filename) #modified by C.Cong
f = dicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype="uint8")
coords = np.loadtxt(contour.ctr_endo_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
classify = mask
img_arr = find_neighbor_images(contour, data_path, num_phases, num_phases_in_cycle, phase_dilation)
if img_arr.ndim < 4:
img_arr = img_arr[..., np.newaxis]
if classify.ndim < 4:
classify = classify[np.newaxis, ..., np.newaxis]
return img_arr, classify
def map_all_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def map_endo_contours(contour_path):
endo = []
epi = []
p1 = []
p2 = []
p3 = []
for dirpath, dirnames, files in os.walk(contour_path):
for endo_f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt'):
endo.append(os.path.join(dirpath, endo_f))
match = re.search(r'IM-0001-(\d{4})-icontour-manual.txt', endo_f) # it always has endo
imgno = match.group(1)
epi_f = 'IM-0001-' + imgno + '-ocontour-manual.txt'
p1_f = 'IM-0001-' + imgno + '-p1-manual.txt'
p2_f = 'IM-0001-' + imgno + '-p2-manual.txt'
p3_f = 'IM-0001-' + imgno + '-p3-manual.txt'
epi.append(os.path.join(dirpath, epi_f))
p1.append(os.path.join(dirpath, p1_f))
p2.append(os.path.join(dirpath, p2_f))
p3.append(os.path.join(dirpath, p3_f))
print('Number of examples: {:d}'.format(len(endo)))
contours = map(Contour, endo, epi, p1, p2, p3)
return contours
def export_all_contours(contours, data_path, overlay_path, crop_size=100, num_classes=4, num_phases=5, phase_dilation=1):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
if num_classes == 2:
num_classes = 1
images = np.zeros((len(contours), num_phases, crop_size, crop_size, 1))
masks = np.zeros((len(contours), 1, crop_size, crop_size, num_classes))
for idx, contour in enumerate(contours):
img, mask = read_contour(contour, data_path, num_classes, num_phases, 20, phase_dilation)
#draw_contour(contour, data_path, overlay_path)
img = center_crop_3d(img, crop_size=crop_size)
mask = center_crop_3d(mask, crop_size=crop_size)
images[idx] = img
masks[idx] = mask
return images, masks
# ###############learning rate scheduler####################
def lr_scheduler(curr_epoch, curr_iter):
total_iter = curr_epoch*steps_per_epoch + curr_iter
lrate = lr_poly_decay(model_s, base_lr, total_iter, max_iter, power=0.5)
print(' - lr: %f' % lrate)
return lrate
if __name__== '__main__':
contour_type = 'a'
weight_s = 'model_logs/sunnybrook_i_unetres_inv_drop_acdc.h5'
shuffle = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
crop_size = 128
num_phases = 5
save_path = 'model_logs'
phase_dilation = 4
data_proc = DataIOProc(TEMP_CONTOUR_PATH, 'p5_a4')
print('Mapping ground truth contours to images in train...')
train_ctrs = list(map_all_contours(TRAIN_CONTOUR_PATH))
if shuffle:
print('Shuffling data')
np.random.shuffle(train_ctrs)
print('Done mapping training set')
num_classes = 2
#No dev
split = int(0.1*len(train_ctrs))
dev_ctrs = train_ctrs[0:split]
train_ctrs = train_ctrs[split:]
print('\nBuilding Train dataset ...')
img_train, mask_train = export_all_contours(train_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation)
print('\nBuilding Dev dataset ...')
img_dev, mask_dev = export_all_contours(dev_ctrs,
TRAIN_IMG_PATH,
TRAIN_OVERLAY_PATH,
crop_size=crop_size,
num_classes=num_classes,
num_phases=num_phases,
phase_dilation=phase_dilation)
input_shape = (num_phases, crop_size, crop_size, 1)
input_shape_s = (crop_size, crop_size, 1)
model_s = unet_res_model_Inv(input_shape_s, num_classes, nb_filters=8, transfer=True, contour_type=contour_type, weights=weight_s)
kwargs = dict(
rotation_range=90,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
image_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
mask_datagen = CardiacTimeSeriesDataGenerator(**kwargs)
aug_img_path = os.path.join(TRAIN_AUG_PATH, "Image")
aug_mask_path = os.path.join(TRAIN_AUG_PATH, "Mask")
img_train = image_datagen.fit(img_train, augment=True, seed=seed, rounds=8, toDir=None)
mask_train = mask_datagen.fit(mask_train, augment=True, seed=seed, rounds=8, toDir=None)
epochs = 200
mini_batch_size = 4
s, p, h, w, d = img_train.shape
s_val, p_val, h_val, w_val, d_val = img_dev.shape
max_iter = int(np.ceil(len(img_train) / mini_batch_size)) * epochs
steps_per_epoch = int(np.ceil(len(img_train) / mini_batch_size))
curr_iter = 0
base_lr = K.eval(model_s.optimizer.lr)
lrate = lr_poly_decay(model_s, base_lr, curr_iter, max_iter, power=0.5)
callbacks = []
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs_unet_time'), histogram_freq=1, write_graph=False,
write_grads=False, write_images=False)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'check_point_model.hdf5'),
save_weights_only=False,
save_best_only=False,
period=2) # .{epoch:d}
callbacks.append(checkpoint)
print('\nPredict for 2nd training ...')
#img_train_s = img_train[:,4,...]
#mask_train_s = mask_train[:,0,...]
#result = model_s.evaluate(img_train_s, mask_train_s)
#result = np.round(result, decimals=10)
#print('\nDev set result {:s}:\n{:s}'.format(str(model_s.metrics_names), str(result)))
if not os.path.exists(TEMP_CONTOUR_PATH):
os.makedirs(TEMP_CONTOUR_PATH)
# Create training dataset
temp_image_t = np.reshape(img_train, (s*p, h, w, d))
temp_mask_t = model_s.predict(temp_image_t, batch_size=32, verbose=1)
temp_mask_t = np.reshape(temp_mask_t, (s, p, h, w, d))
data_proc.save_image_4d(temp_mask_t, 'training')
data_proc.save_image_4d(mask_train, 'training_mask')
data_proc.save_data_4d(temp_mask_t.astype('float32'), 'training_data.bin')
data_proc.save_data_4d(mask_train.astype('float32'), 'training_mask.bin')
# train_mask_p = np.zeros((s, p, w, h, 1), dtype=K.floatx())
# for idx_s in range(s):
# img_train_p = img_train[idx_s,...]
# train_mask_p[idx_s] = model_s.predict(img_train_p)
#
# for idx_p in range(p):
# mask = train_mask_p[idx_s, idx_p, ...]
# img = img_train[idx_s, idx_p, ...]
# img = np.squeeze(img*mask)
# img_name = '{:d}-{:d}'.format(idx_s, idx_p)
# imsave(os.path.join(TEMP_CONTOUR_PATH, img_name + ".png"), img)
# Create validation dataset
print('\nTotal sample is {:d} for 2nd training.'.format(s))
print('\nPredict for 2nd evaluating ...')
temp_image_dev = np.reshape(img_dev, (s_val*p_val, w_val, h_val, d_val))
temp_mask_dev = model_s.predict(temp_image_dev, batch_size=16, verbose=1)
temp_mask_dev = np.reshape(temp_mask_dev, (s_val, p_val, w_val, h_val, d_val))
data_proc.save_image_4d(temp_mask_dev, 'evaluation')
data_proc.save_image_4d(mask_dev, 'evaluation_mask')
data_proc.save_data_4d(temp_mask_dev.astype('float32'), 'eval_data.bin')
data_proc.save_data_4d(mask_dev.astype('float32'), 'eval_mask.bin')
#print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# val_mask_p = np.zeros((s_val, p_val, w_val, h_val, 1), dtype=K.floatx())
# for idx_s in range(s_val):
# img_val_p = img_dev[idx_s,...]
# val_mask_p[idx_s] = model_s.predict(img_val_p)
# dev_generator = (temp_mask_dev, mask_dev)
# print('\nTotal sample is {:d} for 2nd evaluation.'.format(s_val))
# model_t = unet_res_model_time(input_shape, num_classes, nb_filters=64, n_phases=num_phases, dilation=phase_dilation, transfer=True, weights=None)
# model_t.fit(temp_mask_t,
# mask_train,
# epochs=epochs,
# batch_size=1,
# validation_data=dev_generator,
# callbacks=callbacks,
# class_weight=None
# )
|
"""
A scaled down version of the Brunel model useful for testing (see OMV files: .test.*)
"""
from brunel08 import runBrunelNetwork
from pyNN.utility import get_script_args
simulator_name = get_script_args(1)[0]
simtime = 1000
order = 100
eta = 2.0 # rel rate of external input
g = 5.0
runBrunelNetwork(g=g, eta=eta, simtime = simtime, order = order, save=True, simulator_name=simulator_name,N_rec=500)
|
import numpy as np
def fft(samples: np.array, fft_size: int):
result = np.fft.fft(samples, fft_size)
result = np.fft.fftshift(result)
return result
def psd(samples, fft_size: int):
window = np.hamming(fft_size)
result = np.multiply(window, samples)
result = np.fft.fft(result, fft_size)
result = np.fft.fftshift(result)
result = np.square(np.abs(result))
result = np.nan_to_num(10.0 * np.log10(result))
return result
def calc_fft_psd(real,imag,fft_size):
samples = [complex(real[x],imag[x]) for x in range(len(real))]
n_fft_steps = int(np.floor(len(samples)/fft_size))
freq_result = np.zeros([n_fft_steps, fft_size])
for i in range(n_fft_steps):
bins = psd(samples[i*fft_size:(i+1)*fft_size],fft_size)
freq_result[i] = bins
return np.mean(freq_result, axis=0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 9 09:33:40 2019
@author: amc
"""
# -------------------- script for A.I. -----------------------#
import numpy
import pandas
import re
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem.snowball import SnowballStemmer
ps = SnowballStemmer('english')
def preprocess(text):
# Stem and remove stopwords
text = re.sub('[^a-zA-Z]', ' ', text)
text = text.lower()
text = text.split()
text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]
return ' '.join(text)
dataset1 = pandas.read_csv('interview1.csv', encoding='ISO-8859-1')
dataset2 = pandas.read_csv('interview2.csv', encoding='ISO-8859-1')
dataset3 = pandas.read_csv('interview3.csv', encoding='ISO-8859-1')
dataset = pandas.concat([dataset1, dataset2, dataset3])
#Reset index otherwise during the loop below we select multiple rows (pandas.concat results in repeated indices)
dataset = dataset.reset_index(drop=True)
querycorpus = []
for i in range(0, len(dataset)):
query = re.sub('[^a-zA-Z]', ' ', dataset['Q'][i])
query = query.lower()
query = query.split()
query = [ps.stem(word) for word in query if not word in set(stopwords.words('english'))]
query = ' '.join(query)
querycorpus.append(query)
# Creating the Bag of Words model with TFIDF and calc cosine_similarity
vectorizer = CountVectorizer(decode_error="replace")
vec_train = vectorizer.fit_transform(querycorpus) #this is needed to get the attribute vocabulary_
training_vocabulary = vectorizer.vocabulary_
transformer = TfidfTransformer()
trainingvoc_vectorizer = CountVectorizer(decode_error="replace", vocabulary=training_vocabulary)
tfidf_querycorpus = TfidfVectorizer().fit_transform(querycorpus)
def answer(newquery, k):
tfidf_newquery = transformer.fit_transform(trainingvoc_vectorizer.fit_transform(numpy.array([preprocess(newquery)])))
cosine_similarities = cosine_similarity(tfidf_newquery, tfidf_querycorpus)
related_docs_indices = (-cosine_similarities[0]).argsort()
sorted_freq = cosine_similarities[0][related_docs_indices]
#note for this distance the problem we had befor with inf, we have now with 0. Again we decide
#to make the prediction a bit random. This could be adjusted to remove any 0 distance and
#pick the only ones left if any, and if none predict 1.
if sum(sorted_freq)==0:
return "Not understood"
elif sorted_freq[k-1]!=sorted_freq[k] or sorted_freq[k-1]==sorted_freq[k]==0:
selected = related_docs_indices[:k]
# return dataset.iloc[selected[0]]['A']
return dataset.iloc[selected[0]]['A'], dataset.iloc[selected,:(k-1)]
print("\n Cosine Similarities:", sorted_freq[:k], "\n")
else:
indeces = numpy.where(numpy.roll(sorted_freq,1)!=sorted_freq)
selected = related_docs_indices[:indeces[0][indeces[0]>=k][0]]
# return dataset.iloc[selected[0]]['A']
return dataset.iloc[selected[0]]['A'], dataset.iloc[selected,:(k-1)]
print("\n Cosine Similarities:", sorted_freq[:k], "\n") |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import cgi
from caesar import encrypt
page_header = """
<!DOCTYPE html>
<html>
<head>
</head>
<body>
"""
# html boilerplate for the bottom of every page
page_footer = """
</body>
</html>
"""
class MainHandler(webapp2.RequestHandler):
"""Handles requests coming in to '/' (the root of the site)
"""
def get(self):
# a form for rotating letters
rotate_form = """
<form action="/" method="post">
<label>
I want to rotate
<input type="text" name="text"/>
by
<input type="text" name="rotation"
.
</label>
<input type="submit" value="Rotate"/>
</form>
"""
self.response.write(rotate_form)
def post(self):
new_text = str(self.request.get("text"))
rotation_number = int(self.request.get("rotation"))
response = encrypt(new_text, rotation_number)
new_text = cgi.escape(response, quote=True)
self.response.write(new_text)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
import cuisine as c
import subprocess
from fog_lib import FogRequester, shutdown
import logging
class SnapinRequester(FogRequester):
"""docstring for SnapinRequester"""
def _handler(self, text):
def process(x):
key = x[0].lower().replace('snapin', '')
value = x[1]
return key, value
lines = text.splitlines()
status, data = lines[0], lines[1:]
if status == self.FOG_OK:
keys_values = (element.split("=") for element in data)
keys_values_processed = (process(x) for x in keys_values)
snapin_dict = dict(keys_values_processed)
return snapin_dict
else:
raise ValueError("No snapins pending")
def get_snapin_data(self):
service = "snapins.checkin"
text = self.get_data(service=service)
snapin_dict = self._handler(text)
return snapin_dict
def download_snapin(self, snapin):
data = self.get_data(service="snapins.file",
binary=True,
taskid=snapin.task_id)
return data
def confirm_snapin(self, snapin):
data = self.get_data(service="snapins.checkin",
taskid=snapin.task_id,
exitcode=snapin.return_code)
return data == self.FOG_OK
class Snapin(object):
"""docstring for Snapin"""
def __init__(self, snapin_dict, snapin_dir, fog_requester):
super(Snapin, self).__init__()
self.snapin_dir = snapin_dir
self.filename = snapin_dict["filename"]
self.task_id = snapin_dict["jobtaskid"]
self.args = snapin_dict["args"]
self.run_with = snapin_dict["runwith"]
self.run_with_args = snapin_dict["runwithargs"]
self.reboot = True if snapin_dict["bounce"] == 1 else False
self.fog_requester = fog_requester
self.return_code = 0
@property
def complete_filename(self):
if self.snapin_dir[-1] != '/':
dirname_slash = self.snapin_dir + '/'
else:
dirname_slash = self.snapin_dir
return dirname_slash + self.filename
def _download(self):
data = self.fog_requester.download_snapin(self)
with open(self.complete_filename, "wb") as snapin_file:
snapin_file.write(data)
def _execute(self):
with c.mode_local():
c.file_ensure(self.complete_filename, mode="700")
line = " ".join([self.run_with, self.run_with_args,
self.complete_filename, self.args])
r_code = subprocess.call(line, shell=True)
self.return_code = r_code
def _confirm(self):
self.fog_requester.confirm_snapin(self)
def install(self):
with c.mode_sudo():
self._download()
self._execute()
self._confirm()
def client_snapin(fog_host, mac, snapin_dir, allow_reboot=False):
fog_requester = SnapinRequester(fog_host=fog_host, mac=mac)
action, reboot = False, False
try:
snapin_dict = fog_requester.get_snapin_data()
snapin = Snapin(snapin_dict, snapin_dir, fog_requester)
snapin.install()
logging.info("Installed " + snapin.complete_filename +
" with returncode " + str(snapin.return_code))
action, reboot = True, snapin.reboot
if allow_reboot and reboot:
shutdown(mode="reboot")
except IOError as e:
logging.info(e)
except ValueError as e:
logging.info(e)
return action, reboot
|
from .Cliente import Cliente
from .interfaces.IClienteService import IClienteService
from .interfaces.IClienteRepository import IClienteRepository
from .interfaces.IEmailService import IEmailService
class ClienteService(IClienteService):
def __init__(self, _emailService: IEmailService, _clienteRepository:IClienteRepository):
self._emailService = _emailService
self._clienteRepository = _clienteRepository
def adicionarCliente(self, cliente: Cliente):
if not cliente.isValid():
return 'Dados inválidos'
self._clienteRepository.adicionarCliente(cliente)
self._emailService.enviar('empresa@empresa.com', cliente.nome, cliente.email, 'Bem Vindo', 'Parabéns está Cadastrado')
return 'Cliente cadastrado com sucesso.'
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
class get_browsers:
'''
********* THIS SCRIPT RETURNS A LIST CONTAINING BROWSERS INSTALLED ON USER'S LINUX SYSTEM *********
CLASS get_browsers DOCINFO:
get_browsers HAVE TWO FUNCTIONS I.E.,
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
'''
self.command_output = "" # TO SAVE DATA RECIEVED FROM COMMAND INTO A STRING
self.browsers = [] # FOR SAVING BROWSER DATA COLLECTED INTO A SINGLE VARIABLE
self.data = "" # TO SAVE FINAL OUTPUT TO WRITE IN FILE
self.current_path = os.getcwd() # TO SAVE CURRENT DIRECTORY PATH
def work(self):
'''
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
ret_data = {"List of Installed Browsers":[]}
self.command_output = os.popen("apropos 'web browser'").read() # COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
self.browsers = self.command_output.split('\n') # SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST
self.browsers.pop() # REMOVING LAST REDUNDANT ELEMENT
self.browsers = [i[:i.find('(')-1] for i in self.browsers] # REFINING NAME FROM THE LIST WE GET
self.data = "S.No,Browser Name\n"
for i in self.browsers:
self.data += str(self.browsers.index(i)+1)+","+str(i)+"\n"
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("Installed Browser.csv","w") as browser: # SAVNG DATA INTO FILE
browser.write(self.data)
self.browsers.insert(0,"Installed Browsers")
for i in self.browsers:
ret_data["List of Installed Browsers"].append([i])
return ret_data # RETURNING THE LIST |
import pytest
from werkzeug.exceptions import HTTPException
from flaskapp.db import get_db
from flaskapp.reports import get_report_by_id
def test_index(client):
response = client.get("/")
assert b"Reports" in response.data
assert b"State" in response.data
@pytest.mark.parametrize("id", (25, 16))
def test_block(client, app, id):
path = f"/block/{id}"
response = client.post(path)
assert response.status == "302 FOUND"
with app.app_context():
db = get_db()
row = db.execute("SELECT * FROM reports WHERE id = ?", (id,)).fetchone()
assert row["state"] == "BLOCKED"
@pytest.mark.parametrize("id", (25, 16))
def test_resolve(client, app, id):
path = f"/resolve/{id}"
response = client.put(path)
assert response.status == "200 OK"
with app.app_context():
db = get_db()
row = db.execute("SELECT * FROM reports WHERE id = ?", (id,)).fetchone()
assert row["state"] == "RESOLVED"
@pytest.mark.parametrize("id", (25, 16))
def test_hidden_resolved(client, app, id):
path = f"/resolve/{id}"
client.put(path)
response = client.get("/")
assert str.encode(f"post-{id}") not in response.data
def test_get_report_by_id(app):
with app.app_context():
row = get_report_by_id(25)
assert row["id"] == 25
with pytest.raises(HTTPException) as e:
get_report_by_id(-1)
|
#!/usr/bin/env python3
# coding: utf-8
__author__ = "Robert Abel"
__copyright__ = "Copyright (c) 2018–2019"
__license__ = "MIT"
import certifi
import os
import sys
import argparse
import hmac
import json
import locale
import pycurl
import random
import re
import yaml
from base64 import b64encode
from datetime import datetime, timezone
from io import BytesIO
from hashlib import sha1
from urllib.parse import urlencode, quote, parse_qs
posts_ep = '/wp/v2/posts'
categories_ep = '/wp/v2/categories'
tags_ep = '/wp/v2/tags'
comments_ep = '/wp/v2/comments'
media_ep = '/wp/v2/media'
users_ep = '/wp/v2/users'
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuth10aNoTokenException(Exception):
pass
class OAuth10a:
def __init__(self, consumerKey, consumerSecret, oauthToken = None, oauthTokenSecret = None):
self._consumerKey = consumerKey
self._consumerSecret = consumerSecret
self._oauthToken = oauthToken
self._oauthTokenSecret = oauthTokenSecret
def updateOAuthToken(self, token, secret):
self._oauthToken = token
self._oauthTokenSecret = secret
def _getOAuthParams(self):
params = {
'oauth_consumer_key': self._consumerKey,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': str(int(datetime.now().timestamp())),
'oauth_nonce': generate_nonce(8),
}
if (self._oauthToken is not None):
params['oauth_token'] = self._oauthToken
return params
@staticmethod
def _sortOAuthParams(oauth_params_list):
return sorted(oauth_params_list, key=lambda x: x[0] + x[1])
@staticmethod
def _OAuthParamsToHeader(oauth_params):
parts = []
parts += ['{0:s}="{1:s}"'.format(quote(k, safe='-._~'), quote(v, safe='-._~')) for k, v in OAuth10a._sortOAuthParams(list(oauth_params.items()))]
return 'Authorization: OAuth ' + ', '.join(parts)
def getOAuthHeader(self, method, url, query_post_params = {}, additional_oauth_params = {}):
oauth_params = self._getOAuthParams()
oauth_params.update(additional_oauth_params)
if ('oauth_signature' in oauth_params):
del oauth_params['oauth_signature']
terms = list(oauth_params.items()) + list(query_post_params.items())
terms = OAuth10a._sortOAuthParams(terms)
text = method + '&' + quote(url, safe='') + '&' + quote('&').join([quote(k) + quote('=') + quote(v) for k, v in terms])
key = quote(self._consumerSecret) + '&'
if (self._oauthTokenSecret is not None):
key += quote(self._oauthTokenSecret)
hashed = hmac.new(key.encode('utf-8'), text.encode('utf-8'), sha1)
oauth_params['oauth_signature'] = b64encode(hashed.digest()).decode('utf-8')
return OAuth10a._OAuthParamsToHeader(oauth_params)
def fn_register(oauth, config):
if ('oauthCallback' not in config or config['oauthCallback'] != 'oob'):
print('oauthCallback missing from config or not \'oob\'...')
return -1
site = config['url']
oauth1_request_url = site + '/oauth1/request'
oauth1_authorize_url = site + '/oauth1/authorize'
oauth1_access_url = site + '/oauth1/access'
c = pycurl.Curl()
c.setopt(c.CAINFO, certifi.where())
#c.setopt(c.VERBOSE, True)
buffer = BytesIO()
post_params = {}
c.setopt(c.POST, True)
c.setopt(c.URL, oauth1_request_url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', oauth1_request_url, post_params, {'oauth_callback': config['oauthCallback']})])
c.setopt(c.POSTFIELDS, '')
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 200):
print('Requesting Authorization failed...')
return -1
response = buffer.getvalue().decode('utf-8')
params = parse_qs(response)
if (not(all(e in params for e in ['oauth_token', 'oauth_token_secret']))):
print('Authorization Response did not contain required token and secret.')
return -2
print('Please visit the following URL:\n'
'\n' +
oauth1_authorize_url + '?' + response
);
oauthVerifier = input('Please insert verifier (or nothing to cancel):')
if (oauthVerifier == ''):
print('Authorization aborted...')
return -3
print('Verifier: ' + oauthVerifier)
oauth.updateOAuthToken(params['oauth_token'][-1], params['oauth_token_secret'][-1])
post_params = {}
add_oauth_param = {
'oauth_verifier': oauthVerifier,
'oauth_callback': config['oauthCallback']
}
buffer = BytesIO()
c.setopt(c.POST, True)
c.setopt(c.URL, oauth1_access_url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', oauth1_access_url, post_params, add_oauth_param)])
c.setopt(c.POSTFIELDS, '')
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 200):
print('Accessing OAuth 1.0a failed...')
return -4
response = buffer.getvalue().decode('utf-8')
params = parse_qs(response)
if (not(all(e in params for e in ['oauth_token', 'oauth_token_secret']))):
print('Authorization Response did not contain required token and secret.')
return -5
config['oauthToken'] = params['oauth_token'][-1]
config['oauthTokenSecret'] = params['oauth_token_secret'][-1]
print('Updated Config:')
print('-' * 72)
print(yaml.dump(config, default_flow_style=False))
print('-' * 72)
return 0
def fn_test(oauth, config):
site = config['url']
site_root = site + '/wp-json{0:s}'
c = pycurl.Curl()
url = site_root.format(posts_ep) + '/483'
json_data = {
'comment_status': 'open',
}
post_params = {}
buffer = BytesIO()
c.setopt(c.CAINFO, certifi.where())
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data))
c.perform()
# HTTP response code, e.g. 200.
status = c.getinfo(c.RESPONSE_CODE)
if (status == 200):
print('Erfolg!!!')
print('Status: %d' % c.getinfo(c.RESPONSE_CODE))
# Elapsed time for the transfer.
print('Status: %f' % c.getinfo(c.TOTAL_TIME))
print('-'*72)
print(buffer.getvalue().decode('UTF-8'))
print('-'*72)
c.close()
return 0
def fn_transfer(oauth, config, args):
blogEntries = []
directory = args.directory
createUser = args.create_users
print('Loading posts...', end='')
for entry in os.scandir(directory):
if (not entry.is_file()):
continue
if (entry.name == 'authors.yml'):
continue
if (entry.name == 'categories.yml'):
continue
if (not(entry.name.endswith('.yml'))):
continue
with open(entry.path, 'r', encoding='utf-8') as f:
data = yaml.load(f)
blogEntries.append(data)
print('Done.')
print('Extracting post categories...')
# extract categories
categories = set()
for entry in blogEntries:
categories.update(entry['categories'])
# extract authors
with open(directory + '/authors.yml', 'r', encoding='utf-8') as f:
authorMap = yaml.load(f)
authorIds = set()
print('Extracting post authors...')
for entry in blogEntries:
authorIds.add(entry['author_id'])
if (any([e for e in authorIds if e not in authorMap])):
print('Error: Author ID {0:d} unmapped.')
return -2
# trim author map
for k in authorMap:
if (k not in authorIds):
del authorMap[k]
# setup
site = config['url']
site_root = site + '/wp-json{0:s}'
print('Retrieving existing post categories...')
query_params = {
'per_page': str(100),
}
buffer = BytesIO()
c = pycurl.Curl()
url = site_root.format(categories_ep)
c.setopt(c.URL, url + '?' + urlencode(query_params))
c.setopt(c.CAINFO, certifi.where())
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('GET', url, query_params)])
c.perform()
# HTTP response code, e.g. 200.
status = c.getinfo(c.RESPONSE_CODE)
if (status != 200):
print('Retrieving existing categories failed...')
return -1
response = json.loads(buffer.getvalue().decode('UTF-8'))
blogCategories = {}
for category in response:
blogCategories[category['name']] = category['id']
category_map = {k: None for k in categories}
for k in category_map:
if (k in blogCategories):
category_map[k] = blogCategories[k]
for k, v in category_map.items():
if (v is None):
print('Creating category {0:s}...'.format(k))
json_data = {
'name': k,
}
post_params = {}
buffer = BytesIO()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data))
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 201):
print('Creating category failed.')
print(buffer.getvalue().decode('UTF-8'))
return -1
response = json.loads(buffer.getvalue().decode('UTF-8'))
category_map[k] = response['id']
print('Category {0:s} is using ID {1:d}'.format(k, response['id']))
else:
print('Category {0:s} is using ID {1:d}'.format(k, v))
print('Retrieving existing users...')
query_params = {
'per_page': str(100),
}
buffer = BytesIO()
c = pycurl.Curl()
url = site_root.format(users_ep)
c.setopt(c.URL, url + '?' + urlencode(query_params))
c.setopt(c.CAINFO, certifi.where())
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('GET', url, query_params)])
c.perform()
# HTTP response code, e.g. 200.
status = c.getinfo(c.RESPONSE_CODE)
if (status != 200):
print('Retrieving existing users failed...')
return -1
response = json.loads(buffer.getvalue().decode('UTF-8'))
blogUsers = {}
for user in response:
blogUsers[user['slug']] = user['id']
if (not(createUser)):
unmappedUsers = [(k, v['slug']) for k, v in authorMap.items() if v['slug'] not in blogUsers]
if (any(unmappedUsers)):
for id, slug in unmappedUsers:
print('Error: user {0:s} does not exist on blog.'.format(slug))
return -2
else:
for k, v in authorMap.items():
if (v['slug'] in blogUsers):
continue
print('Creating user {0:s}...'.format(v['slug']))
json_data = {
'name': v['name'],
'slug': v['slug'],
'username': v['slug'],
'email': v['slug'] + '@example.com',
'password': 'passw9rd!',
}
post_params = {}
buffer = BytesIO()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data).encode('UTF-8'))
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 201):
print('Creating user failed.')
print(buffer.getvalue().decode('UTF-8'))
return -1
response = json.loads(buffer.getvalue().decode('UTF-8'))
blogUsers[response['slug']] = response['id']
print('User {0:s} is using ID {1:d}'.format(response['slug'], response['id']))
for k, v in authorMap.items():
print('User {0:s} is using ID {1:d}'.format(v['slug'], blogUsers[v['slug']]))
# process posts
url = site_root.format(posts_ep)
for entry in blogEntries:
title = entry['title']
oldAuthorId = entry['author_id']
categories = entry['categories']
comments = [] if ('entries' not in entry['comments']) else entry['comments']['entries']
content = entry['content']
date = datetime.fromisoformat(entry['date'])
print('Processing \'{0:s}\' with {1:d} comments...'.format(title, len(comments)))
categoryIds = [category_map[c] for c in categories]
authorId = blogUsers[authorMap[oldAuthorId]['slug']]
# rework content
#
# \r\n<br/>\r\n --> \r\n
# <p>\space*</p> --> kill
# \space+[word]+\r\n[word]+\space+ --> [word]+ [word+]
# \space+-\r\n[word]+ --> \space+- [word]+
content = re.sub(r'\r\n<br/>\r\n', r'\r\n', content)
content = re.sub(r'<p>\s*</p>', r'', content)
content = re.sub(r'\xA0', r'', content) #nbsp;
content = re.sub(r'(\w+)\r\n<a(.*)</a>\r\n(\w+)', r'\1 <a\2</a> \3', content)
content = re.sub(r'</a>\r\n([.:,])', r'</a>\1', content)
content = re.sub(r'(\s+)([,.:\w\-\(\)]+)\s*\r\n(?!\d)([.,:\w\-\(\)]+)(\s)', r'\1\2 \3\4', content)
content = re.sub(r'(\s+)([,.:\w\-\(\)]+)\r\n\s*(?!\d)([.,:\w\-\(\)]+)(\s)', r'\1\2 \3\4', content)
content = re.sub(r'(\s+)-\r\n(\s+)([.,\w\-\(\)]+)', r'\1-\2\3', content)
json_data = {
'date_gmt': date.astimezone(timezone.utc).isoformat(),
'status': 'publish',
'title': title,
'content': content,
'author': str(authorId),
'comment_status': 'closed' if (comments == []) else 'open',
'ping_status': 'closed',
'format': 'standard',
'categories': [str(e) for e in categoryIds]
}
post_params = {}
buffer = BytesIO()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data))
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 201):
print(' Creating post \'{0:s}\' failed.'.format(title))
print(buffer.getvalue().decode('UTF-8'))
return -1
response = json.loads(buffer.getvalue().decode('UTF-8'))
post_id = response['id']
print(' Created post #{0:d}.'.format(post_id))
if (comments != []):
# create comments
comment_url = site_root.format(comments_ep)
for comment_ix, comment in enumerate(comments):
comment_author = comment['authorName']
comment_date = datetime.fromisoformat(comment['date'])
comment_content = comment['content']
json_data = {
'date_gmt': comment_date.astimezone(timezone.utc).isoformat(),
'author_name': comment_author,
'author_email': 'sysmail@bingo-ev.de',
'content': comment_content,
'post': str(post_id),
'status': 'approve',
}
post_params = {}
buffer = BytesIO()
c.setopt(c.URL, comment_url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', comment_url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data))
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 201):
print(' Creating comment {0:d} failed.'.format(comment_ix))
print(buffer.getvalue().decode('UTF-8'))
return -1
post_url = '{0:s}/{1:d}'.format(site_root.format(posts_ep), post_id)
json_data = {
'comment_status': 'closed',
}
post_params = {}
buffer = BytesIO()
c.setopt(c.URL, post_url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.HTTPHEADER, [oauth.getOAuthHeader('POST', post_url), 'Content-Type: application/json; charset=utf-8'])
c.setopt(c.POSTFIELDS, json.dumps(json_data))
c.perform()
status = c.getinfo(c.RESPONSE_CODE)
if (status != 200):
print(' Closing comments for post \'{0:s}\' failed.'.format(title))
print(buffer.getvalue().decode('UTF-8'))
return -1
return 0
def checkConfig(config):
return all([k in config for k in ['url', 'consumerKey', 'consumerSecret']])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='config.yml')
subparsers = parser.add_subparsers(title='command', dest='subcommand', help='sub-command', required=True)
parser_register = subparsers.add_parser('register')
parser_transfer = subparsers.add_parser('transfer')
parser_transfer.add_argument('--create-users', action='store_true', default=False)
parser_transfer.add_argument('directory', default=None)
parser_test = subparsers.add_parser('test')
args = parser.parse_args()
try:
with open(args.config, 'r', encoding='utf-8') as f:
config = yaml.load(f)
except Exception as e:
print('Could not load configuration \'{0:s}\'...'.format(args.config))
return -1
if (not(checkConfig(config))):
print('Configuration file invalid.')
return -1
oauth = OAuth10a(config['consumerKey'],
config['consumerSecret'],
config.get('oauthToken', None),
config.get('oauthTokenSecret', None)
)
if (args.subcommand == 'register'):
return fn_register(oauth, config)
elif (args.subcommand == 'test'):
return fn_test(oauth, config)
elif (args.subcommand == 'transfer'):
return fn_transfer(oauth, config, args)
else:
print('Unknown command \'{0:s}\'...'.format(args.subcommand))
return -2
return 0
if __name__ == '__main__':
sys.exit(main()) |
__author__ = 'AlexLlamas'
from Tkinter import *
from samples import Samples
from CorrelationMesures import *
import matplotlib.pyplot as plt
import sympy as sym
from scipy.optimize import curve_fit
def colocar_scrollbar(listbox,scrollbar):
scrollbar.config(command=listbox.yview)
listbox.config(yscrollcommand=scrollbar.set)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.pack(side=LEFT, fill=Y)
def cargarlistbox(lista,listbox):
ind,largo=0,len(lista)
while ind < largo:
listbox.insert(END,lista[ind])
ind += 1
def case_function(samples, obj):
sam = Samples()
print (samples)
numBinx, numBiny = Binx.get(), Biny.get()
#numBinx = 30
#numBiny = 30
print (numBinx, numBiny)
print (numBinx)
x, y = samples[:, 0], samples[:, 1]
MI = mutual_info(x, y, numBinx, numBiny)
ex = entropy(x, numBinx)
ey = entropy(y, numBiny)
R = norm_MI(x, y, numBinx, numBiny)
rxy = pearson_corr(x, y)
exy = entropyx_y(x, y, numBinx, numBiny)
eyx = entropyx_y(y, x, numBinx, numBiny)
MIE = MI_Entropy(x, y, numBinx, numBiny)
Ixy = propuesta_Ixy(x, y, numBinx, numBiny)
Iyx = propuesta_Iyx(x, y, numBinx, numBiny)
Ixy2 = propuesta2_Ixy(x, y, numBinx, numBiny)
Iyx2 = propuesta2_Iyx(x, y, numBinx, numBiny)
PMD = propuesta_mutual_dependency(x, y, numBinx, numBiny)
PMD2 = propuesta2_mutual_dependency(x,y, numBinx, numBiny)
# d_cor = d_corr(x,y)
# Mic = MIC(x,y)
mitexto.set('Entropy of x: ' + str(ex) + '\n'+
'Entropy of y: ' + str(ey) + '\n' +
'Mutual information: ' + str(MI) + '\n' +
'Mutual info with entropy: ' + str(MIE) + '\n' + '\n' +
'(Max=1)Normalized Mutual info: '+ str(R) + '\n' +
'(Max=1)Pearson Correlation: ' + str(rxy) + '\n' +
# '(Max=1)Distance Correlation: ' + str(d_cor) + '\n' +
# '(Max=1)MIC: ' + str(Mic) + '\n' +
'(Max=1)Mutual Dependency: ' + str(PMD) + '\n' +
'(Max=1)Mutual Dependency2: ' + str(PMD2) + '\n' + '\n' +
'Entropy of X|Y = ' + str(exy) + '\n' +
'Entropy of Y|X = ' + str(eyx)+ '\n' +
'(Max=1)Information in Y of X: ' + str(Ixy) + '\n' +
'(Max=1)Information in X of Y: ' + str(Iyx) + '\n' +
'(Max=1)Information2 in Y of X: ' + str(Ixy2) + '\n' +
'(Max=1)Information2 in X of Y: ' + str(Iyx2))
if plot_sample.get():
sam.plot_sample(samples, numBinx, numBiny, step.get())
if plot_compare.get():
obj.plot_compare2(numBinx, numBiny, noise.get(), step.get())
if plot_propuse.get():
sam.plot_propose(samples, numBinx)
if plot_partition.get():
sam.plot_partition(samples, numBinx, numBiny)
if plot_converge_in_samples.get():
Converge_in_samples(numBinx, numBiny, obj)
if samples_check.get():
samples_grid(numBinx, numBiny)
def func(x,a,b,c,d):
return (a/(b*x+c))+d
def invfunc(y,a,b,c,d):
return (a/float(b*(y-d))) - (c/float(b))
def samples_grid(numBinx, numBiny):
sig = 0.99 # significancia
v = 10 # muestras por bloque deseadas 5 para squere
samPerBlock = (v*sig)/math.sqrt(1-math.pow(sig,2))
numSamples = int(numBinx*numBiny*samPerBlock)
print ('The number of samples needed for a significance of ' + str(sig) + 'with v = ' + str(v) + 'Binx = ' + str(numBinx) + 'Biny= ' + str(numBiny) + 'is...: ' + str(numSamples))
def Converge_in_samples(numBinx, numBiny, obj):
# this function plot the measures with different number of samples
# the objective of this function is find converge in the measures
# this is, adding more samples does not change or change very litle the measur.
converge_value_PMD = 0.631995917246
converge_value_PMD2 = 0.346874335815
converge_value_R = 0.0607182686559
ini = 150 # initial number of samples
end = numberSam.get()
stepi = step.get()
size = ((end - ini)/stepi)
PMD = np.zeros(size)
PMD2 = np.zeros(size)
R = np.zeros(size)
r_PMD = np.zeros(size)
r_PMD2 = np.zeros(size)
r_R = np.zeros(size)
axis = np.zeros(size)
k = 0
for i in range(ini, end, stepi):
samples = obj.get(i, noise.get())
x, y = samples[:, 0], samples[:, 1]
PMD[k] = propuesta_mutual_dependency(x, y, numBinx, numBiny)
PMD2[k] = propuesta2_mutual_dependency(x, y, numBinx, numBiny)
R[k] = norm_MI(x, y, numBinx, numBiny)
axis[k] = i
k += 1
fig6 = plt.figure(6)
plt.plot(axis, PMD, 'r-')
plt.plot(axis, PMD2, 'g-')
plt.plot(axis, R, 'b-')
popt_PMD, pcov = curve_fit(func, axis, PMD)
popt_PMD2, pcov = curve_fit(func, axis, PMD2)
popt_R, pcov = curve_fit(func, axis, R)
plt.plot(axis, func(axis,*popt_PMD), 'r-')
plt.plot(axis, func(axis,*popt_PMD2), 'g-')
plt.plot(axis, func(axis,*popt_R), 'b-')
plt.xlabel('Number of samples')
plt.ylabel('Mutual Dependency uniform (red) | Mutual Dependency p(x) (green) | mutual information (blue)' )
plt.ylim((0,1))
fig6.show()
# ----------------- fin converge values --------------------------------------------------------
a = popt_PMD[0]
b = popt_PMD[1]
c = popt_PMD[2]
e = 1 # variacion entre muestras i.e. X_2 = X_1 + e. Entre mayor sea e mas tarda en converger.
eps = 0.00001 # si el resultado de evaluar f(X_2)- f(X_1) = eps, entonces converge. Entre menor se eps mas tarda en converger.
PMDx = ((-(((pow(b,2))*e)+(2*b*c)) + pow(((((pow(b,2))*e)+(2*b*c)) - ((4*(pow(b,2)))*((b*c*e) + pow(c,2) - (a*b*e/eps)))),0.5))/float(2*pow(b,2)))
a = popt_PMD2[0]
b = popt_PMD2[1]
c = popt_PMD2[2]
PMD2x = ((-(((pow(b,2))*e)+(2*b*c)) + pow(((((pow(b,2))*e)+(2*b*c)) - ((4*(pow(b,2)))*((b*c*e) + pow(c,2) - (a*b*e/eps)))),0.5))/float(2*pow(b,2)))
a = popt_R[0]
b = popt_R[1]
c = popt_R[2]
Rx = ((-(((pow(b,2))*e)+(2*b*c)) + pow(((((pow(b,2))*e)+(2*b*c)) - ((4*(pow(b,2)))*((b*c*e) + pow(c,2) - (a*b*e/eps)))),0.5))/float(2*pow(b,2)))
# ------------------------------------------------------------------------------------------------------------------
print ('----------------------------------------------------------------------')
print ('Number of samples to converge in UMD (red): ' + str(PMDx))
print ('value of converge for UMD: ' + str(func(PMDx,*popt_PMD)))
print ('Number of samples to converge in CMD (green): ' + str(PMD2x))
print ('value of converge for CMD: ' + str(func(PMD2x,*popt_PMD2)))
print ('Number of samples to converge in MI (blue): ' + str(Rx))
print ('value of converge for MI: ' + str(func(PMDx,*popt_R)))
print ('----------------------------------------------------------------------')
print ('Number of samples to get a converge value of ' + str(converge_value_PMD) + ' in UMD is: ' + str(invfunc(converge_value_PMD, *popt_PMD)) + 'ASB, ' + str(numBinx) + 'x' +str(numBiny) + ': '+ str(invfunc(converge_value_PMD, *popt_PMD)/float(numBinx*numBiny)))
print ('Number of samples to get a converge value of ' + str(converge_value_PMD2) + ' in CMD is: ' + str(invfunc(converge_value_PMD2, *popt_PMD2)) + 'ASB, ' + str(numBinx) + 'x' +str(numBiny) + ': '+ str(invfunc(converge_value_PMD2, *popt_PMD2)/float(numBinx*numBiny)))
print ('Number of samples to get a converge value of ' + str(converge_value_R) + ' in MI is: ' + str(invfunc(converge_value_R, *popt_R)) + 'ASB, ' + str(numBinx) + 'x' +str(numBiny) + ': '+ str(invfunc(converge_value_R, *popt_R)/float(numBinx*numBiny)))
def plot_select():
ind = list1.curselection()
if list1.curselection() != ():
if ind[0] == 0:
sam = Samples(tipo=0)
samples = sam.get_sin(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 1:
sam = Samples(tipo=1)
samples = sam.get_square(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 2:
sam = Samples(tipo=2)
samples = sam.get_blur(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 3:
sam = Samples(tipo=3)
samples = sam.get_cuadratic(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 4:
sam = Samples(tipo=4)
samples = sam.get_diagonal_line(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 5:
sam = Samples(tipo=5)
samples = sam.get_horizontal_line(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 6:
sam = Samples(tipo=6)
samples = sam.get_vertical_line(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 7:
sam = Samples(tipo=7)
samples = sam.get_x(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 8:
sam = Samples(tipo=8)
samples = sam.get_circle(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 9:
sam = Samples(tipo=9)
samples = sam.get_curve_x(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 10:
sam = Samples(tipo=10)
samples = sam.get_diagonal_line2(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 11:
sam = Samples(tipo=11)
samples = sam.get_dependent(numberSam.get())
case_function(samples, sam)
elif ind[0] == 12:
sam = Samples(tipo=12)
samples = sam.get_independent(numberSam.get())
case_function(samples, sam)
elif ind[0] == 13:
sam = Samples(tipo=13)
samples = sam.get_corr(numberSam.get(), noise.get())
case_function(samples, sam)
elif ind[0] == 14:
sam = Samples(tipo=14)
samples = sam.get_file()
case_function(samples, sam)
else:
mitexto.set('Error')
else:
mitexto.set('Select a type')
# Constants
# Principal Window
v0=Tk()
v0.minsize(width=300, height=400)
# Listbox ---------------------------------
frame1=Frame(v0)
frame1.pack()
scroll1=Scrollbar(frame1)
list1=Listbox(frame1)
list1.pack()
colocar_scrollbar(list1,scroll1)
ListaNombres = ['Sinusoidal', 'Uniform', 'Blur', 'Quadratic', 'Diagonal line 1', 'Horizontal line', 'Vertical line', 'X line',
'Circle', 'X curve', 'Diagonal line 2', 'Dependent', 'Independent', 'Correlated', 'File']
cargarlistbox(ListaNombres,list1)
# ------------------------------------------
# Other variables --------------------------
mitexto = StringVar()
label1 = Label(v0,textvar=mitexto).pack()
b1=Button(v0,text="Calculate",command=lambda: plot_select()).pack()
# Noise variable -------------------
noise = DoubleVar(value=0.2)
e1 = Entry(v0,textvar=noise).pack()
# Number of samples variable --------
numberSam = IntVar(value=10050)
e2 = Entry(v0,textvar=numberSam).pack()
binvalue = 30
Binx = IntVar(value=binvalue)
e3 = Entry(v0,textvar=Binx).pack()
Biny = IntVar(value=binvalue)
e4 = Entry(v0,textvar=Biny).pack()
step = IntVar(value=50)
e5 = Entry(v0,textvar=step).pack()
plot_sample = IntVar(value=0)
c1 = Checkbutton(v0, text="Plot Sample structure?", variable=plot_sample).pack()
plot_compare = IntVar(value=0)
c2 = Checkbutton(v0, text="Plot Comparative?", variable=plot_compare).pack()
plot_propuse = IntVar(value=0)
c3 = Checkbutton(v0, text="Plot UMD for Grids ixi?", variable=plot_propuse).pack()
plot_partition = IntVar(value=0)
c4 = Checkbutton(v0, text="Plot histogram of the structure?", variable=plot_partition).pack()
plot_converge_in_samples = IntVar(value=0)
c5 = Checkbutton(v0, text="Plot Converge in samples?", variable=plot_converge_in_samples).pack()
samples_check = IntVar(value=0)
c6 = Checkbutton(v0, text="Calculate samples for the grid?", variable=samples_check).pack()
# ----------------------------------------------
v0.mainloop() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:wiley
# datetime:2020/4/21 9:38 AM
"""
Definition for a Node.
"""
class Node:
def __init__(self, x, next=None, random=None):
self.val = int(x)
self.next = next
self.random = random
class Solution(object):
def __init__(self):
self.copy_list = {}
def copy_random_list(self, head):
"""
:type head: Node
:rtype: Node
"""
if head is None:
return None
if head in self.copy_list:
return self.copy_list[head]
node = Node(head.val)
self.copy_list[head] = node
node.next = self.copy_random_list(head.next)
node.random = self.copy_random_list(head.random)
return node
if __name__ == '__main__':
pass
|
"""This script creates object structure to store the extracted data
and provides methods to set and retrieve the attributes"""
import enum
class Contents:
"""This class creates content_list which is list of extracted contents"""
def __init__(self):
self.content_list = list()
self.content_counter = 0
def add_content(self, _content):
"""This method appends extracted content to content_list"""
_content.content_index = self.content_counter
self.content_counter += 1
self.content_list.append(_content)
def get_content(self, index):
"""Return content at given index"""
if not self.content_list:
return None
return self.content_list[index]
def convert_to_dict(self):
"""This is custom method to convert the Contents
object to dictionary"""
# It will store list of dictionary representation of _Content object
formatted_contents_list = list()
for _content in self.content_list:
formatted_contents_list.append(_content.__dict__)
# It will store dictionary representation of Contents object
formatted_contents = dict()
for key, value in self.__dict__.items():
if key == 'content_list':
# Updating list of _Content object with
# list of dictionary representation of _Content object
formatted_contents[key] = formatted_contents_list
else:
formatted_contents[key] = value
return formatted_contents
class _Content:
"""This class creates object representation for each extracted content"""
def __init__(self, content_type):
self.content_type = content_type
# default value = -1 since field
# has not been initialized and is
# invalid currently
self.content_index = -1
def get_content_type(self):
"""Returns the content type of the content"""
return self.content_type.name
class ContentType(enum.Enum):
"""This class creates enumerated constants for Content Type"""
UNKNOWN = 1
TEXT = 2 # data_models/test
IMAGE = 3 # data_models/image
VIDEO = 4 # data_models/video
QUOTE = 5 # data_models/quote
EMBEDDED_TWEET = 6 # data_models/embedded_tweet
EMBEDDED_INSTAGRAM_POST = 7 # data_models/embedded_instagram_post
EMBEDDED_PINTEREST_PIN = 8 # data_models/embedded_pinterest_pin
EMBEDDED_YOUTUBE_VIDEO = 9 # data_models/embedded_youtube_video
def is_embedded_content(self):
"""Return true if content is embedded content"""
return self.value in [
self.EMBEDDED_INSTAGRAM_POST.value,
self.EMBEDDED_PINTEREST_PIN.value,
self.EMBEDDED_TWEET.value,
self.EMBEDDED_YOUTUBE_VIDEO.value
]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest import mock
from airflow.providers.google.cloud.hooks.datapipeline import DataPipelineHook
TASK_ID = "test-datapipeline-operators"
TEST_NAME = "projects/test-project-id/locations/test-location"
TEST_BODY = {
"name": "projects/test-project-id/locations/test-location/pipelines/test-pipeline",
"type": "PIPELINE_TYPE_BATCH",
"workload": {
"dataflowFlexTemplateRequest": {
"launchParameter": {
"containerSpecGcsPath": "gs://dataflow-templates-us-central1/latest/Word_Count_metadata",
"jobName": "test-job",
"environment": {"tempLocation": "test-temp-location"},
"parameters": {
"inputFile": "gs://dataflow-samples/shakespeare/kinglear.txt",
"output": "gs://test/output/my_output",
},
},
"projectId": "test-project-id",
"location": "test-location",
}
},
}
TEST_LOCATION = "test-location"
TEST_PROJECTID = "test-project-id"
TEST_DATA_PIPELINE_NAME = "test-data-pipeline-name"
TEST_PARENT = "projects/test-project-id/locations/test-location"
TEST_JOB_ID = "test-job-id"
TEST_NAME = "projects/test-project-id/locations/test-location/pipelines/test-data-pipeline-name"
class TestDataPipelineHook:
"""
Module meant to test the DataPipeline Hooks
"""
def setup_method(self):
self.datapipeline_hook = DataPipelineHook(gcp_conn_id="google_cloud_default")
@mock.patch("airflow.providers.google.cloud.hooks.datapipeline.DataPipelineHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.datapipeline.build")
def test_get_conn(self, mock_build, mock_authorize):
"""
Test that get_conn is called with the correct params and
returns the correct API address
"""
connection = self.datapipeline_hook.get_conn()
mock_build.assert_called_once_with(
"datapipelines", "v1", http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == connection
@mock.patch("airflow.providers.google.cloud.hooks.datapipeline.DataPipelineHook.build_parent_name")
def test_build_parent_name(self, mock_build_parent_name):
"""
Test that build_parent_name is called with the correct params and
returns the correct parent string
"""
result = self.datapipeline_hook.build_parent_name(
project_id=TEST_PROJECTID,
location=TEST_LOCATION,
)
mock_build_parent_name.assert_called_with(
project_id=TEST_PROJECTID,
location=TEST_LOCATION,
)
assert mock_build_parent_name.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.datapipeline.DataPipelineHook.get_conn")
def test_create_data_pipeline(self, mock_connection):
"""
Test that request are called with the correct params
Test that request returns the correct value
"""
mock_locations = mock_connection.return_value.projects.return_value.locations
mock_request = mock_locations.return_value.pipelines.return_value.create
mock_request.return_value.execute.return_value = {"name": TEST_PARENT}
result = self.datapipeline_hook.create_data_pipeline(
body=TEST_BODY,
project_id=TEST_PROJECTID,
location=TEST_LOCATION,
)
mock_request.assert_called_once_with(
parent=TEST_PARENT,
body=TEST_BODY,
)
assert result == {"name": TEST_PARENT}
@mock.patch("airflow.providers.google.cloud.hooks.datapipeline.DataPipelineHook.get_conn")
def test_run_data_pipeline(self, mock_connection):
"""
Test that run_data_pipeline is called with correct parameters and
calls Google Data Pipelines API
"""
mock_request = (
mock_connection.return_value.projects.return_value.locations.return_value.pipelines.return_value.run
)
mock_request.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
result = self.datapipeline_hook.run_data_pipeline(
data_pipeline_name=TEST_DATA_PIPELINE_NAME,
project_id=TEST_PROJECTID,
location=TEST_LOCATION,
)
mock_request.assert_called_once_with(
name=TEST_NAME,
body={},
)
assert result == {"job": {"id": TEST_JOB_ID}}
|
from core.evaluation.labels import Label
from core.source.opinion import Opinion
from core.source.vectors import OpinionVector
from core.source.news import News
from words import NewsWords
class TextPosition:
"""
Represents an article sample by given newsID,
and [left, right] entities positions
"""
def __init__(self, news_ID, left, right):
assert(isinstance(news_ID, int)) # news index, which is a part of news filename
assert(isinstance(left, int))
assert(isinstance(right, int))
self._news_ID = news_ID
self._left = left
self._right = right
def equals(self, other):
assert(isinstance(other, TextPosition))
return self._left == other._left and \
self._right == other._right and \
self._news_ID == other._news_ID
@property
def news_ID(self):
return self._news_ID
@property
def left_entity_index(self):
return self._left
@property
def right_entity_index(self):
return self._right
class NewsDescriptor:
"""
Contains necessary classes for a news
"""
def __init__(self, news_ID, news, news_words, opinion_collections, opinion_vectors_collections):
assert(isinstance(news_ID, int)) # news index, which is a part of news filename
assert(isinstance(news, News))
assert(isinstance(news_words, NewsWords))
assert(isinstance(opinion_collections, list))
assert(isinstance(opinion_vectors_collections, list))
self._news_ID = news_ID
self._news = news
self._news_words = news_words
self._opinion_collections = opinion_collections
self._opinion_vectors_collections = opinion_vectors_collections
@property
def opinion_collections(self):
return self._opinion_collections
@property
def news_index(self):
return self._news_ID
@property
def news(self):
return self._news
@property
def entities(self):
return self._news.entities
@property
def news_words(self):
return self._news_words
@property
def opinion_vector_collections(self):
return self._opinion_vectors_collections
class ExtractedRelation:
"""
Represents a relation which were found in news article
and composed between two named entities
(it was found especially by Opinion with predefined label)
"""
def __init__(self, opinion_vector, text_position, left_entity_value, right_entity_value, label):
assert(isinstance(opinion_vector, OpinionVector) or opinion_vector is None)
assert(isinstance(text_position, TextPosition))
assert(isinstance(left_entity_value, unicode))
assert(isinstance(right_entity_value, unicode))
assert(isinstance(label, Label))
self.opinion_vector = opinion_vector # NLP vector
self.text_position = text_position
self.left_entity_value = left_entity_value
self.right_entity_value = right_entity_value
self.label = label
def create_opinion(self):
return Opinion(self.left_entity_value, self.right_entity_value, self.label)
|
import json
import requests
import sys
import argparse
def highestID():
return requests.get("https://0ym0hvjsll.execute-api.us-east-2.amazonaws.com/default/chat/highestid").json()
def newMessage(msgID = None):
i = highestID()
if msgID == None:
message_id = i
else:
message_id = str(msgID)
r = requests.get("https://0ym0hvjsll.execute-api.us-east-2.amazonaws.com/default/chat?id="+message_id)
messages = r.json()["Items"]
if len(messages) > 0:
with open("messages.txt", "w") as f:
for item in messages:
reponse = item['user']+ ": "+ item['message'] + "\n"
f.write(reponse)
#print(reponse)
return len(messages) > 0
def getMessages():
f = open("messages.txt","r")
lines = f.readlines()
for line in lines:
print(line)
# print(highestID())
# print(newMessage(-1))
|
from functools import partial
import numpy as np
def macd(values, alpha=0.8, period1=26, period2=12):
x = np.array(exponential_moving_average(values, alpha=alpha, period=period1))
y = np.array(exponential_moving_average(values, alpha=alpha, period=period2))
return x-y
def rsi(values, alpha=0.5):
increases = [0]
decreases = [0]
for k in range(1, len(values)):
if values[k] - values[k-1] > 0:
increases.append(values[k] - values[k-1])
decreases.append(0)
else:
decreases.append(values[k-1] - values[k])
increases.append(0)
increases_exp_avg = np.array(exponential_moving_average(increases, alpha=alpha))
decreases_exp_avg = np.array(exponential_moving_average(decreases, alpha=alpha))
return (100*(decreases_exp_avg / (decreases_exp_avg + increases_exp_avg))).tolist()
def next_roi(values, delta=1):
results = []
for k in range(0, len(values)-delta):
result = (values[k+delta] - values[k])/values[k]
results.append(result)
return results+delta*[np.nan]
def prev_roi(values, delta=1):
results = []
for k in range(delta, len(values)):
result = (values[k]-values[k-delta])/values[k-delta]
results.append(result)
return delta*[np.nan]+results
def prev_typical_price(high, low, close, period):
typical_prices = typical_price(high, low, close)
return period * [np.nan] + typical_prices[:-period]
def next_typical_price(high, low, close, period):
typical_prices = typical_price(high, low, close)
return typical_prices[period:] + period * [np.nan]
def typical_price(high, low, close):
return ((np.array(high) + np.array(low) + np.array(close))/3).tolist()
def exponential_moving_average(values, period=None, alpha=0.5):
if period is None:
moving_averages = [alpha*values[0]]
for k in range(1, len(values)):
average = moving_averages[-1]*(1-alpha) + alpha*values[k]
moving_averages.append(average)
return moving_averages
else:
moving_averages = []
for k in range(period, len(values)):
moving_average = 0
for j in range(k-period, k+1):
moving_average += alpha*values[j]*((1-alpha)**period-j)
moving_averages.append(moving_average)
return period*[np.nan] + moving_averages
def moving_average(values, period):
moving_averages = [values[0]]
for k in range(1, len(values)):
average = np.mean(values[k-min(k, period):k])
moving_averages.append(average)
return moving_averages
def volatility(values, period):
volatilities = [np.nan]
for k in range(1, len(values)):
vol = np.var(values[k-min(k, period):k])
volatilities.append(vol)
return volatilities
def p_moment(values, period, p):
moments = [np.nan]
for k in range(1, len(values)):
moment = np.mean(np.absolute(np.array(values[k-min(k, period):k]) - np.mean(values[k-min(k, period):k]))**p)
moments.append(moment)
return moments
def trix(values, alpha=0.5):
triple_average = exponential_moving_average(exponential_moving_average(exponential_moving_average(values, alpha=alpha), alpha=alpha), alpha=alpha)
results = [np.nan]
for k in range(1, len(triple_average)):
result = (triple_average[k] - triple_average[k-1]) / ((triple_average[k] + triple_average[k-1])/2)
results.append(result)
return results
def percent_r(high, low, close, period):
rs = [np.nan]
highest = high[0]
lowest = low[0]
close_price = close[0]
high_prices =[high[0]]
low_prices = [low[0]]
for k in range(1, len(low)):
if len(high_prices) < period:
high_prices.append(high[k])
low_prices.append(low[k])
elif len(high_prices) == period:
high_prices.pop()
low_prices.pop()
high_prices.append(high[k])
low_prices.append(low[k])
close_price = close[k]
highest = max(high_prices)
lowest = min(low_prices)
r = (-100.0) * (highest - close_price) / (highest - lowest)
rs.append(r)
return rs
def avg(high, low, close, period):
bps = []
trs = []
results = []
for k in range(1, len(high)):
bp = close[k] - min(low[k], close[k-1])
tr = max(high[k], close[k-1]) - min(low[k], close[k-1])
bps.append(bp)
trs.append(tr)
if len(bps) >= period:
results.append(sum(bps[k-period:k])/sum(trs[k-period:k]))
else:
results.append(sum(bps[0:k])/sum(trs[0:k]))
return [np.nan]+results
def ult_osc(high, low, close):
avg7 = np.array(avg(high, low, close, 7))
avg14 = np.array(avg(high, low, close, 14))
avg28 = np.array(avg(high, low, close, 28))
return [np.nan] + ((4*avg7 + 2*avg14 + avg28)/7).tolist()[1:]
def cci(typical_price, period, high=None, low=None, close=None):
# values = typical_price(high, low, close)
cci_values = ((np.array(typical_price) - np.array(moving_average(typical_price, period))) / np.sqrt(np.array(volatility(typical_price, period)))).tolist()
for k, value in enumerate(cci_values):
if value == np.inf or value == -np.inf:
cci_values = cci_values[:k] + [np.nan] + cci_values[k+1:]
return cci_values
SMALL_PERIOD = 5
BIG_PERIOD = 30
ALL_TYPICAL_FUNCTIONS = [
partial(next_roi, delta=1),
partial(prev_roi, delta=1),
partial(prev_roi, delta=2),
partial(prev_roi, delta=3),
partial(prev_roi, delta=4),
partial(prev_roi, delta=5),
exponential_moving_average,
partial(exponential_moving_average, period=SMALL_PERIOD),
partial(exponential_moving_average, period=BIG_PERIOD),
partial(moving_average, period=SMALL_PERIOD),
partial(moving_average, period=BIG_PERIOD),
partial(volatility, period=SMALL_PERIOD),
partial(volatility, period=BIG_PERIOD),
partial(p_moment, period=SMALL_PERIOD, p=3),
partial(p_moment, period=BIG_PERIOD, p=3),
partial(p_moment, period=SMALL_PERIOD, p=4),
partial(p_moment, period=BIG_PERIOD, p=4),
trix,
partial(rsi, alpha=0.5),
macd,
partial(cci, period=SMALL_PERIOD),
partial(cci, period=BIG_PERIOD)
]
ALL_HLC_FUNCTIONS = [
typical_price,
partial(prev_typical_price, period=1),
partial(prev_typical_price, period=2),
partial(prev_typical_price, period=3),
partial(prev_typical_price, period=4),
partial(prev_typical_price, period=5),
partial(percent_r, period=SMALL_PERIOD),
partial(percent_r, period=BIG_PERIOD),
ult_osc
]
ALL_TYPICAL_NAMES = [
'next_roi_1',
'prev_roi_1',
'prev_roi_2',
'prev_roi_3',
'prev_roi_4',
'prev_roi_5',
'exp_avg_inf',
'exp_avg_%s' % SMALL_PERIOD,
'exp_avg_%s' % BIG_PERIOD,
'mov_avg_%s' % SMALL_PERIOD,
'mov_avg_%s' % BIG_PERIOD,
'vol_%s' % SMALL_PERIOD,
'vol_%s' % BIG_PERIOD,
'moment_3_%s' % SMALL_PERIOD,
'moment_3_%s' % BIG_PERIOD,
'moment_4_%s' % SMALL_PERIOD,
'moment_4_%s' % BIG_PERIOD,
'trix',
'rsi',
'macd',
'cci_%s' % SMALL_PERIOD,
'cci_%s' % BIG_PERIOD
]
ALL_HLC_NAMES = [
'typical_price',
'prev_typical_price_1',
'prev_typical_price_2',
'prev_typical_price_3',
'prev_typical_price_4',
'prev_typical_price_5',
'percent_r_%s' % SMALL_PERIOD,
'percent_r_%s' % BIG_PERIOD,
'ult_osc'
]
|
'''
This module should contain a class that automates gromacs tool gmx msd
'''
import os
import MDAnalysis as mda
import numpy as np
from MDAnalysis.analysis.lineardensity import LinearDensity
from MDAnalysis.analysis.waterdynamics import MeanSquareDisplacement as MSD
from . import neighbors
from .neighbors import Neighbors
from .. import log
from ..common import exec_gromacs, GMXNAME
from ..systeminfo import SysInfo
from ..definitions import lipidmolecules
class MSDanalysis(SysInfo):
def __init__(self,inputfilename="inputfile"):
super().__init__(inputfilename)
self.lipid_type_items = ' '.join(self.molecules)
self.lipid_types_mainlipid = ''.join(self.molecules[0])
self.lipid_types_sterol = ''.join(self.molecules[1])
def MSD_mdanalysis(self,start_frame,end_frame):
'''This function calulate the MSD through MDAnalaysis'''
u = mda.Universe(self.gropath,self.trjpath)
selection = ('resname {} and name P'.format(self.lipid_types_mainlipid))
print(self.lipid_types_mainlipid)
if self.times[2] == '1000':
start_frame = 100
frame_intervals = 1
end_frame = 300
else:
start_frame = 1000
frame_intervals = 10
end_frame = 3000
MSD_analysis = MSD(u, selection, start_frame, end_frame, 20)
MSD_analysis.run()
with open("msd_mdanalysis.xvg" , 'w') as fout:
time = 0
fout.write('Time\tMSD\n')
for msd in MSD_analysis.timeseries:
fout.write("{time} {msd}\n".format(time=time, msd=msd))
time += 1
def MSD_gromacs_mainlipid(self,start_time,end_time):
'''This function calulate the MSD through Gromacs'''
cwd = os.getcwd()
index_msd = ''.join(cwd + '/' + 'index_msd_mainlipid.ndx')
msd_raw = ''.join(cwd + '/' + 'msd_mainlipid_raw')
get_selection = [GMXNAME, 'select', '-f', self.trjpath, '-s', self.tprpath, '-on', index_msd, \
'-select', '(resname {} and name P)'.format(self.lipid_types_mainlipid)]
#print(get_selection)
out, err = exec_gromacs(get_selection)
get_msd = [GMXNAME, 'msd', '-f', self.trjpath, '-s', self.tprpath, '-n', index_msd, \
'-o', msd_raw, '-lateral', 'z', '-b', str(start_time), '-rmcomm', '-beginfit', '-1', '-endfit', '-1']
#print(get_msd)
out, err = exec_gromacs(get_msd)
with open("gmx_msd_mainlipid.log","a") as logfile:
logfile.write(err)
logfile.write(out)
with open("msd_mainlipid_raw.xvg", 'r') as f:
ls = f.readlines()
with open("msd_mainlipid.dat" , 'w') as fout:
fout.write('Time\tMSD\n')
for l in ls:
lc = l.strip()
if lc:
if lc[0] != '#' and lc[0] != '@':
lf = lc.split()
fout.write('{}\t{}\n'.format(lf[0],lf[1]))
def MSD_gromacs_sterol(self,start_time,end_time):
'''This function calulate the MSD through Gromacs'''
cwd = os.getcwd()
index_msd = ''.join(cwd + '/' + 'index_msd_sterol.ndx')
msd_raw = ''.join(cwd + '/' + 'msd_sterol_raw')
head_atoms = lipidmolecules.head_atoms_of(self.lipid_types_sterol)
head_atom = head_atoms[0]
get_selection = [GMXNAME, 'select', '-f', self.trjpath, '-s', self.tprpath, '-on', index_msd, \
'-select', '(resname {} and name {})'.format(self.lipid_types_sterol,head_atom)]
#print(get_selection)
out, err = exec_gromacs(get_selection)
get_msd = [GMXNAME, 'msd', '-f', self.trjpath, '-s', self.tprpath, '-n', index_msd, \
'-o', msd_raw, '-lateral', 'z', '-b', str(start_time), '-rmcomm', '-beginfit', '-1', '-endfit', '-1']
#print(get_msd)
out, err = exec_gromacs(get_msd)
with open("gmx_msd_sterol.log","a") as logfile:
logfile.write(err)
logfile.write(out)
with open("msd_sterol_raw.xvg", 'r') as f:
ls = f.readlines()
with open("msd_sterol.dat" , 'w') as fout:
fout.write('Time\tMSD\n')
for l in ls:
lc = l.strip()
if lc:
if lc[0] != '#' and lc[0] != '@':
lf = lc.split()
fout.write('{}\t{}\n'.format(lf[0],lf[1]))
class MSDanalysisDirect(SysInfo):
def __init__(self,inputfilename="inputfile"):
super().__init__(inputfilename)
self.lipid_type_items = ' '.join(self.molecules)
self.lipid_types_first = ''.join(self.molecules[0])
def MSD(self,start_frame,end_frame,ref_time):
'''This function calulate the MSD through MDAnalaysis'''
u = mda.Universe(self.gropath,self.trjpath)
selection = u.select_atoms('resname {} and name P'.format(self.lipid_types_first))
u.trajectory[ref_time]
selection_ref_xyz = selection.positions
print(selection_ref_xyz)
# for ts in u.trajectory[ref_time]:
# selection_ref_xyz = selection.positions
# print(selection_ref_xyz)
t0 = ref_time*100
with open("msd_direct_mdanalysis.xvg" , 'w') as fout:
fout.write('Time\tMSD\n')
for i_ts,ts in enumerate(u.trajectory[ref_time:end_frame:]):
time = u.trajectory.time
#print(len(self.u.trajectory))
#n_frame += 1
selection_xyz = selection.positions
print(selection_xyz)
displacement = selection_xyz - selection_ref_xyz
print(displacement)
displacement_norm = np.linalg.norm(displacement, axis=1)
print(displacement_norm)
mean_squared_displacement = np.mean(1e-2*(displacement_norm**2))
print(mean_squared_displacement)
fout.write('{}\t{}\n'.format(u.trajectory.time - t0,mean_squared_displacement))
def MSD_smooth(self,start_frame,end_frame,n_ref):
'''This function calulate the MSD through MDAnalaysis'''
u = mda.Universe(self.gropath,self.trjpath)
selection = u.select_atoms('resname {} and name P'.format(self.lipid_types_first))
data = np.zeros((len(u.trajectory[start_frame:end_frame:]),n_ref))
time = np.zeros(len(u.trajectory[start_frame:end_frame:]))
for i in range(n_ref):
#t0 = (start_frame+i)*100
t0 = start_frame+i
u.trajectory[start_frame+i]
selection_ref_xyz = selection.positions
print(selection_ref_xyz)
n_frame = 0
for i_ts,ts in enumerate(u.trajectory[start_frame+i:end_frame:]):
#time = u.trajectory.time
print(selection.n_atoms)
#print(len(self.u.trajectory))
selection_xyz = selection.positions
# print(selection_xyz)
# print(selection_ref_xyz)
displacement = selection_xyz - selection_ref_xyz
#print(displacement)
displacement_norm = np.linalg.norm(displacement, axis=1)
#print(displacement_norm)
mean_squared_displacement = np.mean(1e-2*displacement_norm)
#print(mean_squared_displacement)
#print(int(((time+i) - t0)/10000))
#data[int(((time+i) - t0)/10000)][i] = mean_squared_displacement
time[start_frame+i+n_frame-t0] = n_frame*100
data[start_frame+i+n_frame-t0][i] = mean_squared_displacement
#print(data)
n_frame += 1
final_msd = np.true_divide(data.sum(1),(data!=0).sum(1))
total_data = np.vstack((time,final_msd))
print(total_data)
with open("msd_direct_smooth.dat" , 'w') as fout:
fout.write('Time\tMSD\n')
for i,j in enumerate(total_data[0,:]):
fout.write('{}\t{}\n'.format(total_data[0,i],total_data[1,i]))
def MSD_another(self,start_frame,end_frame,ref_time):
'''This function calulate the MSD through MDAnalaysis'''
u = mda.Universe(self.gropath,self.trjpath)
selection = u.select_atoms('resname {} and name P'.format(self.lipid_types_first))
u.trajectory[ref_time]
selection_ref_xyz = selection.positions
print(selection_ref_xyz)
# for ts in u.trajectory[ref_time]:
# selection_ref_xyz = selection.positions
# print(selection_ref_xyz)
t0 = ref_time*100
with open("msd_direct_mdanalysis.xvg" , 'w') as fout:
fout.write('Time\tMSD\n')
for i_ts,ts in enumerate(u.trajectory[ref_time:end_frame:]):
time = u.trajectory.time
#print(len(self.u.trajectory))
#n_frame += 1
selection_xyz = selection.positions
print(selection_xyz)
displacement = selection_xyz - selection_ref_xyz
print(displacement)
displacement_norm = np.linalg.norm(displacement, axis=1)
print(displacement_norm**2)
mean_squared_displacement = np.mean(1e-2*displacement_norm**2)
print(mean_squared_displacement)
fout.write('{}\t{}\n'.format(u.trajectory.time - t0,mean_squared_displacement))
|
#!/usr/bin/env python
'''
Features to do a "make" double-pump.
This feature rely on the "prepare" step to have run. It produces "build" and "install" steps.
'''
from waflib.TaskGen import feature
import waflib.Logs as msg
from orch.wafutil import exec_command
import orch.features
orch.features.register_defaults(
'makemake',
build_cmd = 'make',
build_cmd_options = '',
build_target = '',
build_target_path = '{build_dir}/{build_target}',
install_cmd = 'make install',
install_cmd_options = '', # please don't set DESTDIR implicitly
install_target = '',
install_target_path = '{install_dir}/{install_target}',
)
@feature('makemake')
def feature_makemake(tgen):
def build(task):
cmdstr = '{build_cmd} {build_cmd_options}'
cmd = tgen.worch.format(cmdstr)
return exec_command(task, cmd)
tgen.step('build',
rule = build,
source = tgen.control_node('prepare'),
target = tgen.worch.build_target_path)
def install(task):
cmdstr = '{install_cmd} {install_cmd_options}'
cmd = tgen.worch.format(cmdstr)
return exec_command(task, cmd)
tgen.step('install',
rule = install,
source = tgen.control_node('build'),
target = tgen.make_node(tgen.worch.install_target_path))
|
tup1 = ('physics', 'chemistry', 1997, 2000)
tup2 = (1, 2, 3, 4, 5)
tup3 = "a", "b", "c", "d"
tup4 = ()
tup5 = (25, 45, 65, 85, 45, 35)
print("Max =", max(tup5))
print("Min =", min(tup5))
print("Length =", len(tup5))
|
"""
Example to demonstrate simple transpiling and evaluating.
"""
from flexx.pyscript import js, py2js, evaljs, evalpy
def foo(a, b=1, *args):
print(a)
return b
# Create jscode object
jscode = js(foo)
# Print some info that we have on the code
print(jscode.name)
print(jscode.pycode)
print(jscode.jscode)
# Convert strings of Python to JS
print(py2js('isinstance(x, str)'))
print(py2js('isinstance(x, Bar)'))
# Evaluate js in nodejs
print(evaljs('10 * 10'))
# Evaluate PyScript in nodejs
print(evalpy('10**10'))
|
#Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Dataset
dataset = pd.read_csv('Academic_Data.csv')
#Create the matrix of features and Dependent Variables vector
X = dataset.iloc[:, :-1].values
#creating the dependent variable vector
y = dataset.iloc[:, 1].values
#Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X =StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
#Fitting Simple Linear Regression to the Training Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
#Predicting the Test Set Results (GPAs)
y_pred = regressor.predict(X_test) #predictions of the test set
#Visualising the Training Set Results
plt.scatter(X_train, y_train, color = 'red') #real values/ observation points of the training set
plt.plot(X_train, regressor.predict(X_train), color = 'blue') #predictions trained by X and y train
plt.title('SAT Score vs GPA (Training Set)')
plt.xlabel('SAT Score')
plt.ylabel('GPA')
plt.show()
#Visualising the Test Set Results
plt.scatter(X_test, y_test, color = 'red') #observation points of the test set
plt.plot(X_train, regressor.predict(X_train), color = 'blue') #same prediction line from training results
plt.title('SAT Score vs GPA (Test Set)')
plt.xlabel('SAT Score')
plt.ylabel('GPA')
plt.show()
|
from book import Book
from user import User
from review import Review
from author import Author
import time
import psycopg2 as dbapi2
class Database:
def __init__(self, db_url):
self.db_url = db_url
def add_book(self, book):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
if book.author is not None:
if self.get_author(book.author) is None:
book.author = self.add_author(Author(name=book.author))
else:
book.author = self.get_author(book.author).id
query = "INSERT INTO BOOK (TITLE, AUTHORID, YR, PGNUM, COVER, DESCRIPTION) VALUES (%s, %s, %s, %s, %s, %s) RETURNING ID"
cursor.execute(query, (book.title, book.author, book.year, book.pageNumber, book.cover, book.description))
connection.commit()
book_id = cursor.fetchone()[0]
for genre in book.genres:
self.add_genre(book_id, genre)
return book_id
def update_book(self, book_id, book):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
if book.author is not None:
if self.get_author(book.author) is None:
book.author = self.add_author(Author(name=book.author))
else:
self.update_author(Author(name=book.author))
book.author = self.get_author(book.author).id
query = "UPDATE BOOK SET TITLE = %s, AUTHORID = %s, YR = %s, PGNUM = %s, COVER = %s, DESCRIPTION = %s WHERE (ID = %s)"
cursor.execute(query, (book.title, book.author, book.year, book.pageNumber, book.cover, book.description, book_id))
connection.commit()
self.delete_genres(book_id)
for genre in book.genres:
self.add_genre(book_id, genre)
def delete_book(self, book_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM BOOK WHERE (ID = %s)"
cursor.execute(query, (book_id,))
connection.commit()
self.delete_genres(book_id)
def get_book(self, book_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query1 = "SELECT TITLE, AUTHORID, YR, PGNUM, COVER, DESCRIPTION FROM BOOK WHERE (ID = %s)"
cursor.execute(query1, (book_id,))
title, author_id, year, pageNumber, cover, description = cursor.fetchone()
author = None
if author_id:
query2 = "SELECT NAME FROM AUTHOR WHERE (ID = %s)"
cursor.execute(query2, (author_id,))
author = cursor.fetchone()[0]
query3 = "SELECT AVG(SCORE) FROM REVIEW WHERE (BOOKID = %s)"
cursor.execute(query3, (book_id,))
avgscore = cursor.fetchone()[0]
genres = self.get_genres(book_id)
book_ = Book(id=book_id, title=title, author=author, year=year, genres=genres, pageNumber=pageNumber, cover=cover, description=description, avgscore=avgscore)
return book_, author_id
def get_books(self, query=None, genre=None, year=None, p=None):
books = []
offset = (int(p) - 1) * 12 if (int(p) > 0 and p) else 0
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
if query:
query1 = "SELECT ID, TITLE, YR, COVER FROM BOOK WHERE LOWER(TITLE) LIKE LOWER(%s) ORDER BY ID OFFSET %s LIMIT 12"
query_words = query.split()
like_pattern = '%'
for word in query_words:
like_pattern += '{}%'.format(word)
cursor.execute(query1, (like_pattern, offset))
elif genre:
query1 = "SELECT BOOK.ID, BOOK.TITLE, BOOK.YR, BOOK.COVER FROM BOOK INNER JOIN GENRES ON BOOK.ID = GENRES.BOOKID WHERE (GENRE = %s) GROUP BY BOOK.ID, BOOK.TITLE, BOOK.YR, BOOK.COVER ORDER BY BOOK.ID OFFSET %s LIMIT 12"
cursor.execute(query1, (genre, offset))
elif year:
query1 = "SELECT ID, TITLE, YR, COVER FROM BOOK WHERE (YR = %s) ORDER BY ID OFFSET %s LIMIT 12"
cursor.execute(query1, (year, offset))
else:
query1 = "SELECT ID, TITLE, YR, COVER FROM BOOK ORDER BY ID OFFSET %s LIMIT 12"
cursor.execute(query1, (offset,))
for book_id, title, year, cover in cursor:
books.append(Book(id=book_id, title=title, year=year, cover=cover))
return books
def get_books_by_author(self, author_id):
books = []
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID, TITLE, YR, COVER FROM BOOK WHERE (AUTHORID = %s) ORDER BY ID"
cursor.execute(query, (author_id,))
for book_id, title, year, cover in cursor:
books.append(Book(id=book_id, title=title, year=year, cover=cover))
return books
def get_books_count(self, query=None, year=None, genre=None):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query1 = "SELECT COUNT(ID) FROM BOOK"
if query or year or genre:
query1 += " WHERE "
if query:
query1 += "LOWER(TITLE) LIKE LOWER(%s)"
query_words = query.split()
like_pattern = '%'
for word in query_words:
like_pattern += '{}%'.format(word)
cursor.execute(query1, (like_pattern,))
elif year:
query1 += "(YR = %s)"
cursor.execute(query1, (year,))
elif genre:
query1 = "SELECT COUNT(BOOK.ID) FROM BOOK INNER JOIN GENRES ON BOOK.ID = GENRES.BOOKID WHERE (GENRE = %s)"
cursor.execute(query1, (genre,))
else:
cursor.execute(query1)
count = cursor.fetchone()[0]
return count
def get_top_books(self):
books = []
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT BOOK.ID, BOOK.TITLE, BOOK.YR, BOOK.COVER, AVG(REVIEW.SCORE) FROM BOOK INNER JOIN REVIEW ON BOOK.ID = REVIEW.BOOKID GROUP BY BOOK.ID, BOOK.TITLE, BOOK.YR, BOOK.COVER ORDER BY AVG(REVIEW.SCORE) DESC LIMIT 10"
cursor.execute(query)
for book_id, title, year, cover, avgscore in cursor:
books.append(Book(id=book_id, title=title, year=year, cover=cover, avgscore=avgscore))
return books
def add_user(self, user):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "INSERT INTO BOOKWORM (USERNAME, EMAIL, PASSWORD, PROFILEPICTURE, GENDER) VALUES (%s, %s, %s, %s, %s) RETURNING ID"
cursor.execute(query, (user.username, user.email, user.password, user.profile_picture, user.gender))
connection.commit()
user_key = cursor.fetchone()[0]
return user_key
def get_user_by_username(self, username):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID, USERNAME, EMAIL, PASSWORD, PROFILEPICTURE, GENDER FROM BOOKWORM WHERE (USERNAME = %s)"
cursor.execute(query, (username,))
try:
user_id, username, email, password, profile_picture, gender = cursor.fetchone()
except:
return None
is_admin = self.check_admin(user_id)
user_ = User(username, email=email, password=password, id=user_id, profile_picture=profile_picture, gender=gender, is_admin=is_admin)
return user_
def get_user_by_email(self, email):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID, USERNAME, EMAIL, PASSWORD, PROFILEPICTURE, GENDER FROM BOOKWORM WHERE (EMAIL = %s)"
cursor.execute(query, (email,))
try:
user_id, username, email, password, profile_picture, gender = cursor.fetchone()
except:
return None
is_admin = self.check_admin(user_id)
user_ = User(username, email=email, password=password, id=user_id, profile_picture=profile_picture, gender=gender, is_admin=is_admin)
return user_
def get_user_by_id(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID, USERNAME, EMAIL, PASSWORD, PROFILEPICTURE, GENDER FROM BOOKWORM WHERE (ID = %s)"
cursor.execute(query, (user_id,))
try:
user_id, username, email, password, profile_picture, gender = cursor.fetchone()
except:
return None
is_admin = self.check_admin(user_id)
user_ = User(username, email=email, password=password, id=user_id, profile_picture=profile_picture, gender=gender, is_admin=is_admin)
return user_
def get_user_id(self, username):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID FROM BOOKWORM WHERE (USERNAME = %s)"
cursor.execute(query, (username,))
try:
user_id = cursor.fetchone()
except:
return None
return user_id
def add_author(self, author):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "INSERT INTO AUTHOR (NAME, DESCRIPTION, PHOTO) VALUES (%s, %s, %s) RETURNING ID"
cursor.execute(query, (author.name, author.description, author.photo))
connection.commit()
author_id = cursor.fetchone()[0]
return author_id
def get_author(self, name):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ID, DESCRIPTION, PHOTO FROM AUTHOR WHERE (NAME = %s)"
cursor.execute(query, (name,))
try:
id, description, photo = cursor.fetchone()
except:
return None
return Author(name=name, id=id, description=description, photo=photo)
def get_author_by_id(self, author_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT NAME, DESCRIPTION, PHOTO FROM AUTHOR WHERE (ID = %s)"
cursor.execute(query, (author_id,))
try:
name, description, photo = cursor.fetchone()
except:
return None
return Author(name=name, id=author_id, description=description, photo=photo)
def delete_author(self, author_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM AUTHOR WHERE (ID = %s)"
cursor.execute(query, (author_id,))
connection.commit()
def update_author(self, author):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "UPDATE AUTHOR SET NAME = %s, DESCRIPTION = %s, PHOTO = %s WHERE (ID = %s)"
cursor.execute(query, (author.name, author.description, author.photo, author.id))
connection.commit()
def add_review(self, review):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "INSERT INTO REVIEW (SCORE, COMMENT, BOOKID, USERID, DATEWRITTEN) VALUES (%s, %s, %s, %s, %s) RETURNING ID"
cursor.execute(query, (review.score, review.comment, review.book, review.author, time.strftime('%Y-%m-%d %H:%M')))
connection.commit()
review_id = cursor.fetchone()[0]
return review_id
def get_reviews(self, book_id):
reviews = []
users = []
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT USERID, SCORE, COMMENT, ID, DATEWRITTEN FROM REVIEW WHERE (BOOKID = %s) ORDER BY ID"
cursor.execute(query, (book_id,))
connection.commit()
for userid, score, comment, review_id, datewritten in cursor:
pp = self.get_user_by_id(userid).profile_picture
user_dict = {
"id": userid,
"profile_picture": pp
}
users.append(user_dict)
author = self.get_user_by_id(userid).username
reviews.append(Review(author, book_id, score, comment, review_id, datewritten))
return reviews, users
def get_reviews_by_user(self, user_id):
reviews = []
book_names = []
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT BOOKID, SCORE, COMMENT, ID, DATEWRITTEN FROM REVIEW WHERE (USERID = %s) ORDER BY ID"
cursor.execute(query, (user_id,))
connection.commit()
for book_id, score, comment, review_id, datewritten in cursor:
author = self.get_user_by_id(user_id).username
book_names.append(self.get_book(book_id)[0].title)
reviews.append(Review(author, book_id, score, comment, review_id, datewritten))
return reviews, book_names
def get_review(self, review_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query1 = "SELECT USERID, BOOKID, SCORE, COMMENT, DATEWRITTEN FROM REVIEW WHERE (ID = %s)"
cursor.execute(query1, (review_id,))
user_id, book_id, score, comment, datewritten = cursor.fetchone()
review_ = Review(author=user_id, book=book_id, score=score, comment=comment, id=review_id, datewritten=datewritten)
return review_
def delete_review(self, review_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM REVIEW WHERE (ID = %s)"
cursor.execute(query, (review_id,))
connection.commit()
def update_review(self, review):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "UPDATE REVIEW SET SCORE = %s, COMMENT = %s WHERE (ID = %s)"
cursor.execute(query, (review.score, review.comment, review.id))
connection.commit()
def delete_user(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM BOOKWORM WHERE (ID = %s)"
cursor.execute(query, (user_id,))
connection.commit()
def update_user(self, user_id, user):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "UPDATE BOOKWORM SET USERNAME = %s, EMAIL = %s, PASSWORD = COALESCE(%s, PASSWORD), PROFILEPICTURE = COALESCE(%s, PROFILEPICTURE), GENDER = %s WHERE (ID = %s)"
cursor.execute(query, (user.username, user.email, user.password, user.profile_picture, user.gender, user_id))
connection.commit()
def add_admin(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "INSERT INTO ADMINS (ADMINID) VALUES (%s)"
cursor.execute(query, (user_id,))
connection.commit()
def delete_admin(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM ADMINS WHERE (ADMINID = %s)"
cursor.execute(query, (user_id,))
connection.commit()
def check_admin(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "SELECT ADMINID FROM ADMINS WHERE (ADMINID = %s)"
cursor.execute(query, (user_id,))
connection.commit()
try:
review_id = cursor.fetchone()[0]
if review_id:
return True
except:
return False
def delete_profile_picture(self, user_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "UPDATE BOOKWORM SET PROFILEPICTURE = NULL WHERE (ID = %s)"
cursor.execute(query, (user_id,))
connection.commit()
def add_genre(self, book_id, genre):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "INSERT INTO GENRES (BOOKID, GENRE) VALUES (%s, %s)"
cursor.execute(query, (book_id, genre))
connection.commit()
def delete_genres(self, book_id):
with dbapi2.connect(self.db_url) as connection:
cursor = connection.cursor()
query = "DELETE FROM GENRES WHERE (BOOKID = %s)"
cursor.execute(query, (book_id,))
connection.commit()
def get_genres(self, book_id):
with dbapi2.connect(self.db_url) as connection:
genres = []
cursor = connection.cursor()
query = "SELECT GENRE FROM GENRES WHERE (BOOKID = %s)"
cursor.execute(query, (book_id,))
for genre in cursor:
genres.append(genre[0])
cursor.execute(query, (book_id,))
connection.commit()
return genres |
# -*- coding: utf-8 -*-
# Module: addon
# Author: Mike Knight
# Created on: 12.09.2019
# License MIT
import sys
import json
from urllib import urlencode
from urlparse import parse_qsl
import xbmcaddon
import xbmcgui
import xbmcplugin
_url = sys.argv[0]
_handle = int(sys.argv[1])
ADDON = xbmcaddon.Addon('plugin.radio.we')
CWD = ADDON.getAddonInfo('path').decode('utf-8')
def get_url(**kwargs):
return '{0}?{1}'.format(_url, urlencode(kwargs))
def get_stations_list():
with open(CWD + '/resources/data/stations.json', 'r') as f:
STATIONS = json.load(f)
return STATIONS
def list_stations():
xbmcplugin.setPluginCategory(_handle, 'Stations')
xbmcplugin.setContent(_handle, 'songs')
stations = get_stations_list()
for station in stations:
list_item = xbmcgui.ListItem(
label=station['name'], thumbnailImage=station['thumb'])
list_item.setArt({
'thumb': CWD + '/resources/media/thumb/' + station['thumb'],
'icon': CWD + '/resources/media/icon/' + station['thumb'],
'fanart': CWD + '/resources/media/fanart/' + station['fanart']
})
list_item.setInfo(
'music', {'title': station['name'], 'genre': station['genre'], 'mediatype': 'episode'})
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
url = get_url(action='play', stream=station['stream'])
is_folder = False
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
xbmcplugin.endOfDirectory(_handle)
def play_stream(path):
play_item = xbmcgui.ListItem(path=path)
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Display the list of stations .
list_stations()
elif params['action'] == 'play':
# Play a video from a provided URL.
play_stream(params['stream'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_stations()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
|
import WConio as W
import logging
log = logging.getLogger('term.windows')
#import our color constants per-platform
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
BROWN = 6
LIGHTGRAY = LIGHTGREY = 7
DARKGRAY = DARKGREY = 8
LIGHTBLUE = 9
LIGHTGREEN = 10
LIGHTCYAN = 11
LIGHTRED = 12
LIGHTMAGENTA = 13
YELLOW = 14
WHITE = 15
defaultcolor = W.gettextinfo()[4]
def init():
W.textmode()
setcursortype(0)
#----------------------------------------------------------------------------
#Actual functions
all_dirty = False
def flip():
'''
Ensure the screen is up-to-date with any virtual shenanigans involved.
Only really needed by curses.
'''
global all_dirty
all_dirty = False
def reset():
'''
Reset the entire terminal.
Used for things that want to exit the game cleanly.
'''
W.textmode()
def resize(h, w):
import subprocess
subprocess.call(['mode', 'con', 'lines=%i' % h, 'cols=%i' % w], shell=True)
#subprocess.call(["mode", "con", "cp", "select=437"], shell=True)
#since we have subprocess already here... this is as good a place as any to set the 437 codepage.
#commented out because I'm not confident it does anything, but if we find out we do need it, it's here.
def restore():
'''
Keep the terminal usable.
Always performed on exit.
'''
W.clreol()
W.textattr(defaultcolor)
W.setcursortype(1)
def settitle(title):
W.settitle(title)
def setcursortype(i):
W.setcursortype(i)
def draw_buffer(buf, x, y):
if buf.dirty:
#generate a wconio buffer
buf._text = render_buffer(buf)
buf.dirty = False
W.puttext(x, y,
x + buf.width-1,
y + buf.height-1,
buf._text
)
def render_buffer(buf):
text = []
for row in buf.data:
for fg, bg, ch in row[:buf.width]:
color = chr(fg + (bg << 4))
text.append(ch)
text.append(color)
#log.debug('text: %r', text)
return ''.join(text)
def raw_getkey():
key = W.getkey()
if key in ('\r', '\n'):
key = 'enter'
return key
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# by David Zashkol
# 2 course, comp math
# Taras Shevchenko National University of Kyiv
# email: davendiy@gmail.com
"""
Компанія Giggle відкриває свій новий офіс у Судиславлі, і ви запрошені на співьесіду.
Ваша задача - розв'язати поставлену задачу.
Вам потрібно створити структуру даних, яка являє собою масив цілих чисел.
Спочатку масив порожній. Вам потрібно підтримувати дві операції:
запит: "? i j" - повертає мінімальний елемент між i-им та j-им, включно;
зміна: "+ i x" - додати елемент x після i-го елементу списку.
Якщо i = 0, то елемент додається у початок масиву.
Звичайно ж, ця структура повинна бути достатньо хорошою.
Вхідні дані
Перший рядок містить кількість операцій n (1 ≤ n ≤ 200000) над масивом.
Наступні n рядків описують самі операції. Усі операції додавання є коректними.
Усі числа, що зберігаються у масиві, за модулем не перевищують 109.
Вихідні дані
Для кожної операції ? у окремому рядку виведіть її результат.
"""
class Element:
def __init__(self, item):
self.item = item
self.next = None
self.pre = None
def __repr__(self):
return "Element of list ({})".format(self.item)
def __str__(self):
return "Element of list ({})".format(self.item)
class BayonetList:
def __init__(self):
self._head = None
self._end = None
self._current = None
def empty(self):
return self._head is None and self._end is None
def go_first(self):
self._current = self._head
def go_end(self):
self._current = self._end
def get_current(self):
if self.empty():
raise Exception("BayonetList: 'get_current' applied to empty container")
else:
return self._current.item
def go_next(self):
if self.empty():
return Exception("BayonetList: 'go_next' applied to empty container")
elif self._current.next is None:
return Exception("BayonetList: 'go_next' applied out of range")
else:
self._current = self._current.next
def go_pre(self):
if self.empty():
return Exception("BayonetList: 'go_pre' applied to empty container")
elif self._current.next is None:
return Exception("BayonetList: 'go_pre' applied out of range")
else:
self._current = self._current.pre
def del_curr(self):
if self.empty():
return Exception("BayonetList: 'del_curr' applied to empty container")
tmp = self._current
tmp_pre = self._current.pre
tmp_next = self._current.next
if tmp_pre is not None:
tmp_pre.next = tmp_next
if tmp_next is not None:
tmp_next.pre = tmp_pre
del tmp
self._current = None
if not self.empty():
self.go_first()
def insert_after(self, item):
tmp = Element(item)
if self.empty():
self._current = tmp
self._head = tmp
self._end = tmp
else:
tmp_next = self._current.next
if tmp_next is not None:
tmp_next.pre = tmp
tmp.next = tmp_next
tmp.pre = self._current
self._current = tmp
def insert_before(self, item):
tmp = Element(item)
if self.empty():
self._head = tmp
self._current = tmp
self._end = tmp
else:
tmp_pre = self._current.pre
if tmp_pre is not None:
tmp_pre.next = tmp
tmp.pre = tmp_pre
tmp.next = self._current
self._current.pre = tmp
def __iter__(self):
self.go_first()
return self
def __next__(self):
if self.empty() or self._current is None:
raise StopIteration
self._current = self._current.next
return self.get_current()
def __str__(self):
return "BayonetList(" + ' '.join(map(str, self)) + ')'
def __repr__(self):
return "BayonetList(" + ' '.join(map(str, self)) + ')'
|
'''
IC* graph for 3 categories
'''
import itertools
import networkx as nx
from sample_set import SampleSet
class HybridGraph(nx.Graph):
def add_directed_edge(self, out_node, in_node):
self.add_edge(out_node, in_node, out=out_node)
class IC_Graph():
def __init__(self, sampleSet, SIGNIFICANCE_LEVEL=0.01):
self.smp = sampleSet
self.graph = nx.Graph()
self.graph.add_nodes_from(self.smp.matrix.columns)
self.nodes = self.graph.nodes
self.SIGNIFICANCE_LEVEL = SIGNIFICANCE_LEVEL
@staticmethod
def _subsets(groupList):
for i in range(0, len(groupList)+1):
for subset in itertools.combinations(groupList, i):
yield list(subset)
def print_results(self, res, node1, node2, subset):
print(f'node1: {node1}, node2: {node2}, subset: {subset}')
print(res)
print(self.smp.probability(list(set([node1]+[node2]+subset))))
print('==========================================')
def _find_minimal_group_for_independence(self, node1, node2, rest):
'''
H0 is that node1, node2 are independent given the subset.
P value should be smaller than the significance level in order to disprove H0
If P val is bigger, method return True => nodes are independent
'''
for subset in self._subsets(rest):
try:
# if node1 == 'A' and node2 == 'D' and 'C' in subset:
# import ipdb; ipdb.set_trace()
res = self.smp.mutual_information([node1], [node2], subset)
except Exception as e:
import ipdb; ipdb.set_trace()
pass
p_val = res['p_val']
if p_val > self.SIGNIFICANCE_LEVEL:
return True, p_val, subset
return False, None, None
def ic_step_1(self):
independents = []
node_arr = [node for node in self.nodes]
for idx1 in range(len(node_arr)):
for idx2 in range(idx1+1, len(node_arr)):
node1 = node_arr[idx1]
node2 = node_arr[idx2]
rest = [node_arr[i] for i in range(len(node_arr)) if i not in (idx1, idx2)]
is_independent, significance, condGroup = self._find_minimal_group_for_independence(node1, node2, rest)
if is_independent:
independents.append((node1, node2, condGroup))
else:
self.graph.add_edge(node1, node2)
return independents
def ic_step_2(self, independents):
for node1, node2, condGroup in independents:
common_neighbors = list(set(self.graph.neighbors(node1)) & set(self.graph.neighbors(node1)))
for neighbor in common_neighbors:
if not neighbor in condGroup:
self.graph.remove_edge(node1, neighbor)
self.graph.remove_edge(node2, neighbor)
self.graph.add_edges_from([(node1, neighbor), (node2, neighbor)], out=neighbor)
@staticmethod
def get_directed_all_directions(directed):
edges2d = [[(edge[0], edge[1]), (edge[1], edge[0])] for edge in directed]
return list(itertools.chain.from_iterable(edges2d))
def get_edges_with_types(self):
directed = [(t, t[2]) for t in self.graph.edges.data('out') if t[2] is not None]
directed_star = [(t, t[2]) for t in self.graph.edges.data('out_star') if t[2] is not None]
directed_edges = [t[0] for t in directed + directed_star]
out_nodes = [t[1] for t in directed + directed_star]
directed_all_directions = self.get_directed_all_directions(directed_edges)
non_directed_edges = [edge for edge in self.graph.edges if edge not in directed_all_directions]
return (directed_edges, list(set(out_nodes)), non_directed_edges)
@staticmethod
def get_neighbor(node, edge):
node_idx = edge.index(node)
neighbor_idx = 1 if node_idx == 0 else 0
return edge[neighbor_idx]
def ic_step_3_r1(self):
_, out_nodes, non_directed_edges = self.get_edges_with_types()
for node in out_nodes:
for edge in non_directed_edges:
if node in edge:
neighbor = self.get_neighbor(node, edge)
self.graph.remove_edge(node, neighbor)
self.graph.add_edges_from([(node, neighbor)], out_star=neighbor)
def ic_step_3_r2(self):
pass
def build_graph(self):
independents = self.ic_step_1()
self.ic_step_2(independents)
self.ic_step_3_r1()
self.ic_step_3_r2()
def ic_graph(self):
return None
|
# 영상의 명암비 조절
# 히스토그램 스트레칭(Histogram stretching) - 영상의 특징을 분석해서 자동으로 기울기를 계산
# 영상의 히스토그램이 그레이스케일 전 구간에서 걸쳐 나타나도록 변경하는 선형 변환 기법
# 정규화 함수
# cv2.normalize(src, dst, alpha=None, beta=None, norm_type=None, dtype=None, mask=None) -> dst
# src : 입력 영상
# dst : 결과 영상 : python에서는 dst를 일반적으로 주지 않는다. None 주면 됨.
# alpha : (노름 정규화인 경우) 목표 노름 값,
# : (원소 값 범위 정규화인 경우) 최소값
# bata : (원소 값 범위 정규화인 경우) 최대값
# norm_type : 정규화 타입. NORM_INF, NORM_L1, NORM_L2, NORM_MINMAX(MIN값과 MAX값을 원하는 값으로 한정).
# dtype : 결과 영상의 타입
# mask : 마스크 영상
# 히스토그램 스트레칭 변환 함수
# 변환 함수의 직선의 방정식 구하기
# 기울기 : 255 / (Gmax - Gmin)
# y 절편 : - 255 x Gmin / (Gmax - Gmin)
# => g(x, y) = 255 / (Gmax - Gmin) x f(x, y) - 255 x Gmin / (Gmax - Gmin)
# = (f(x, y) - Gmin) / (Gmax - Gmin) x 255
import sys
import numpy as np
import cv2
def getGrayHistImage(hist):
imgHist = np.full((100, 256), 255, dtype=np.uint8)
histMax = np.max(hist)
for x in range(256):
pt1 = (x, 100)
pt2 = (x, 100 - int(hist[x, 0] * 100 / histMax))
cv2.line(imgHist, pt1, pt2, 0)
return imgHist
src = cv2.imread('Hawkes.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed!')
sys.exit()
# dst = cv2.normalize(src, None, 0, 255, cv2.NORM_MINMAX)
# 수식으로 계산
gmin = np.min(src)
gmax = np.max(src)
dst = np.clip((src - gmin) * 255. / (gmax - gmin), 0, 255).astype(np.uint8) # 실수 형태 이므로 255.
hist = cv2.calcHist([src], [0], None, [256], [0, 256])
histImg = getGrayHistImage(hist)
hist2 = cv2.calcHist([dst], [0], None, [256], [0, 256])
histImg2 = getGrayHistImage(hist2)
cv2.imshow('src', src)
cv2.imshow('dst', dst)
cv2.imshow('histImg', histImg)
cv2.imshow('histImg2', histImg2)
cv2.waitKey()
cv2.destroyAllWindows()
|
import json
import plotly
import pandas as pd
import numpy as np
import wordcloud
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Heatmap, Layout, Figure, Scatter
import plotly.express as px
from sklearn.externals import joblib
from sqlalchemy import create_engine
# import custom MessageTokenizer
from os import path
import sys
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from message_tokenizer import MessageTokenizer
# create app and read in data
app = Flask(__name__)
engine = create_engine('sqlite:///data/DisasterMessages.db')
df = pd.read_sql_table('CleanMessages', engine)
# load model
model = joblib.load("models/disaster_logit.pkl")
# set up the word cloud data
wc = joblib.load('models/word_cloud.pkl')
# create the word cloud figure here so it doesn't have to be generated on each refresh
wordcloud_fig = px.imshow(wc)
wordcloud_fig.update_layout(
title=dict(text='150 Most Common Words in Disaster Scenarios', x=0.5),
width=1000,
height=1000,
xaxis={'showgrid': False, 'showticklabels': False, 'zeroline': False},
yaxis={'showgrid': False, 'showticklabels': False, 'zeroline': False},
hovermode=False
)
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
}
]
# heatmap of label pairwise correlations
# drop some cols and replace 1's with np.nan
heatmap_df = df.drop(['id'], axis=1).corr().replace(1, np.nan)
cols = list(heatmap_df.columns)
# grab the lower triangle of the values array - zeroes out the other values
heatmap_df = np.tril(heatmap_df)
# replace 0s with nan
heatmap_df[heatmap_df == 0] = np.nan
heatmap_data = Heatmap(
z=heatmap_df,
x=cols,
y=cols
)
heatmap_layout = Layout(
title=dict(text='Message Label Pairwise Correlations', x=0.5),
width=1000,
height=1000
)
graphs.append(Figure(data=heatmap_data, layout=heatmap_layout))
# append the earlier created word cloud figure
graphs.append(wordcloud_fig)
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
# print('here at the index page!')
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# classification type comes from the radio button selection
classification_type = request.args.get('classification_type', 'hard_class')
# use model to predict classification for query
# 0 1 prediction for 'hard_class'
if classification_type == 'hard_class':
classification_labels = model.predict([query])[0]
# predict label probability for 'soft_class'
elif classification_type == 'soft_class':
classification_labels = model.predict_proba([query])[0]
classification_keys = list(df.columns[4:])
classification_results = dict(zip(classification_keys, classification_labels))
# pass the data back to the go.html page
return render_template(
'go.html',
query=query,
classification_result=classification_results,
classification_type=classification_type
)
def main():
app.run(debug=True)
if __name__ == '__main__':
main() |
import click
from colorama import Fore, Style
from constants import ProjInfo
def print_usage():
print("USAGE: $ veripypi [PKG_NAME] [GITHUB_AUTHOR_PKG]")
print("EX: $ veripypi shallow-backup alichtman/shallow-backup")
def print_version(splash=False):
"""
Format version differently for CLI and splash screen.
"""
version = "v{} by {} (@{})".format(ProjInfo.VERSION,
ProjInfo.AUTHOR_FULL_NAME,
ProjInfo.AUTHOR_GITHUB)
if splash:
print(Fore.RED + Style.BRIGHT + "\t{}\n".format(version) + Style.RESET_ALL)
else:
print(version)
@click.command(context_settings=dict(help_option_names=['-h', '-help', '--help']))
@click.argument('pypi', help='Pypi package name')
@click.argument('github', help='Github project in [author/repo] format.')
@click.option('--version', '-v', is_flag=True, default=False, help='Display version and author info.')
def main(pypi, github, version):
# Input validation
if version:
print_version()
if not pypi or not github:
print_usage()
return
print_version(splash=True)
# TODO: Contain this so that untrusted code can be run more safely?
# Maybe do this in a series of checks. First compare the setup.py files,
# then if those match, run the installer and go from there?
# TODO: Create sdist from latest release of a GitHub repo of the package to be verified.
# TODO: Install PyPi version of the package
# TODO: Compare the two installations
# TODO: Clean up
# TODO: Display success/failure.
pass
if __name__ == '__main__':
main()
|
# https://leetcode.com/problems/koko-eating-bananas/description/
"""
Koko loves to eat bananas. There are n piles of bananas, the ith pile has piles[i] bananas. The guards have gone and will come back in h hours.
Koko can decide her bananas-per-hour eating speed of k. Each hour, she chooses some pile of bananas and eats k bananas from that pile. If the pile has less than k bananas, she eats all of them instead and will not eat any more bananas during this hour.
Koko likes to eat slowly but still wants to finish eating all the bananas before the guards return.
Return the minimum integer k such that she can eat all the bananas within h hours.
Example 1:
Input: piles = [3,6,7,11], h = 8
Output: 4
Example 2:
Input: piles = [30,11,23,4,20], h = 5
Output: 30
Example 3:
Input: piles = [30,11,23,4,20], h = 6
Output: 23
Constraints:
1 <= piles.length <= 104
piles.length <= h <= 109
1 <= piles[i] <= 109
"""
from math import ceil
def min_eating_speed(piles: list[int], h: int) -> int:
def is_done(k: int) -> bool:
ret = sum([ceil(p / k) for p in piles])
return ret <= h
lo, hi = 1, h * max(piles)
while lo < hi:
mid = (lo + hi) // 2
if is_done(mid):
hi = mid
else:
lo = mid + 1
return lo
|
from .squeeze1d import Squeeze1d
from .argmax_product import BinaryProductArgmaxSurjection
from .utils import integer_to_base, base_to_integer
|
import os
import gradio as gr
import torchaudio
import time
from datetime import datetime
from tortoise.api import TextToSpeech
from tortoise.utils.audio import load_audio, load_voice, load_voices
VOICE_OPTIONS = [
"random", # special option for random voice
"custom_voice", # special option for custom voice
"disabled", # special option for disabled voice
]
def inference(text, emotion, prompt, voice, mic_audio, voice_b, voice_c, preset, seed):
if voice != "custom_voice":
voices = [voice]
else:
voices = []
if voice_b != "disabled":
voices.append(voice_b)
if voice_c != "disabled":
voices.append(voice_c)
if emotion != "None/Custom":
text = f"[I am really {emotion.lower()},] {text}"
elif prompt.strip() != "":
text = f"[{prompt},] {text}"
c = None
if voice == "custom_voice":
if mic_audio is None:
raise gr.Error("Please provide audio from mic when choosing custom voice")
c = load_audio(mic_audio, 22050)
if len(voices) == 1 or len(voices) == 0:
if voice == "custom_voice":
voice_samples, conditioning_latents = [c], None
else:
voice_samples, conditioning_latents = load_voice(voice)
else:
voice_samples, conditioning_latents = load_voices(voices)
if voice == "custom_voice":
voice_samples.extend([c])
sample_voice = voice_samples[0] if len(voice_samples) else None
start_time = time.time()
gen, _ = tts.tts_with_preset(
text,
voice_samples=voice_samples,
conditioning_latents=conditioning_latents,
preset=preset,
use_deterministic_seed=seed,
return_deterministic_state=True,
k=3,
)
with open("Tortoise_TTS_Runs.log", "a") as f:
f.write(
f"{datetime.now()} | Voice: {','.join(voices)} | Text: {text} | Quality: {preset} | Time Taken (s): {time.time()-start_time} | Seed: {seed}\n"
)
return (
(22050, sample_voice.squeeze().cpu().numpy()),
(24000, gen[0].squeeze().cpu().numpy()),
(24000, gen[1].squeeze().cpu().numpy()),
(24000, gen[2].squeeze().cpu().numpy()),
)
def main():
text = gr.Textbox(lines=4, label="Text:")
emotion = gr.Radio(
["None/Custom", "Happy", "Sad", "Angry", "Disgusted", "Arrogant"],
value="None/Custom",
label="Select emotion:",
type="value",
)
prompt = gr.Textbox(lines=1, label="Enter prompt if [Custom] emotion:")
preset = gr.Radio(
["ultra_fast", "fast", "standard", "high_quality"],
value="fast",
label="Preset mode (determines quality with tradeoff over speed):",
type="value",
)
voice = gr.Dropdown(
os.listdir(os.path.join("tortoise", "voices")) + VOICE_OPTIONS,
value="angie",
label="Select voice:",
type="value",
)
mic_audio = gr.Audio(
label="Record voice (when selected custom_voice):",
source="microphone",
type="filepath",
)
voice_b = gr.Dropdown(
os.listdir(os.path.join("tortoise", "voices")) + VOICE_OPTIONS,
value="disabled",
label="(Optional) Select second voice:",
type="value",
)
voice_c = gr.Dropdown(
os.listdir(os.path.join("tortoise", "voices")) + VOICE_OPTIONS,
value="disabled",
label="(Optional) Select third voice:",
type="value",
)
seed = gr.Number(value=0, precision=0, label="Seed (for reproducibility):")
selected_voice = gr.Audio(label="Sample of selected voice (first):")
output_audio_1 = gr.Audio(label="Output [Candidate 1]:")
output_audio_2 = gr.Audio(label="Output [Candidate 2]:")
output_audio_3 = gr.Audio(label="Output [Candidate 3]:")
interface = gr.Interface(
fn=inference,
inputs=[
text,
emotion,
prompt,
voice,
mic_audio,
voice_b,
voice_c,
preset,
seed,
],
outputs=[selected_voice, output_audio_1, output_audio_2, output_audio_3],
)
interface.launch(share=True)
if __name__ == "__main__":
tts = TextToSpeech()
with open("Tortoise_TTS_Runs.log", "a") as f:
f.write(
f"\n\n-------------------------Tortoise TTS Logs, {datetime.now()}-------------------------\n"
)
main()
|
import datetime
import os
import random
import string
import warnings
import time
from math import sqrt
import numpy as np
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage
PROJECT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
OUTPUT_DIR = os.path.join(PROJECT_DIR,"output")
s_timestamp_prefix = ""
debug = False
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
def s_timestamp():
return s_timestamp_prefix + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in
range(6)) + '_' + datetime.datetime.now().strftime("%H_%M_%S %d_%m_%y")
r_namespaces = {}
five_pin_setups = ["PulseInput","PulseInput+OneInhibition","PulseInput+TwoInhibition","OscilationInput","OcilationInput+OneInhibition"]
def load_r_file(filename, namespace):
if namespace not in r_namespaces:
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
if PROJECT_DIR not in filename:
filename = os.path.join(PROJECT_DIR, 'r_src', 'forAndrej', filename)
with open(filename, 'r') as pout:
source = pout.read()
res = SignatureTranslatedAnonymousPackage(source, namespace)
r_namespaces[namespace] = res
return r_namespaces[namespace]
def getFeedbackAndForwardLinks(M):
if M.ndim == 2:
n, m = M.shape
else:
n, m = M.shape[0], 1
if n != m:
if n == 1 and int(sqrt(m)) == sqrt(m):
M = np.reshape(M, (sqrt(m), sqrt(m)))
n = int(sqrt(m))
elif m == 1 and int(sqrt(n)) == sqrt(n):
M = np.reshape(M, (sqrt(n), sqrt(n)))
n = int(sqrt(n))
else:
raise ValueError('input matrix must be square')
return [M[i, k] for i in range(n) for k in range(n) if i - k > 1], [M[i, k] for i in range(n) for k in range(n) if
i - k <= 1]
def getFeedbackLinks(M):
res, _ = getFeedbackAndForwardLinks(M)
return res
def getForwardLinks(M):
_, res = getFeedbackAndForwardLinks(M)
return res
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def getSubplots(n):
temp = {1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2), 5: (2, 3), 6: (2, 3), 7: (3, 3), 8: (3, 3), 9: (3, 3),
10: (3, 4), 11: (3, 4), 12: (4, 4), 13: (4, 4), 14: (4, 4), 15: (4, 4), 16: (4, 4)}
if n not in temp:
smaller = int(sqrt(n))
if smaller * smaller == n:
return smaller, smaller
else:
return smaller, smaller + 1
return temp[n]
|
import PyQt5.QtWidgets as qtw
import PyQt5.QtCore as qtc
import PyQt5.QtGui as qtg
import uuid
from mate.ui.views.map.layer.obstacleData_config_view \
import Ui_ObstacleDataConfig
from mate.ui.views.map.layer.layer_config import LayerConfig, LayerConfigMeta
import mate.net.utils as net_utils
import mate.ui.utils as ui_utils
import mate.net.nao as nao
class ObstacleDataConfig(qtw.QWidget, LayerConfig, metaclass=LayerConfigMeta):
def __init__(self, layer, parent, update_callback, nao):
super(ObstacleDataConfig, self).__init__(parent)
self.layer = layer
self.update_callback = update_callback
self.nao = nao
self.identifier = uuid.uuid4()
self.ui = Ui_ObstacleDataConfig()
self.ui.setupUi(self)
if self.layer["settings"] is None:
self.layer["settings"] = {
"center_x": 5.2,
"center_y": -3.7,
"transformation": {
"key": "Brain.RobotPosition",
"key_lambda": 'output = input["pose"]'
},
"obstacles": {
"key": "Brain.TeamObstacleData",
"key_lambda": 'output = input["obstacles"]',
"goalPostColor": "#000000",
"unknownColor": "#ff5500",
"anonymousRobotColor": "#ff00ff",
"hostileRobotColor": "#ff0000",
"teamRobotColor": "#0000ff",
"fallenAnonymousRobotColor": "#7f007f",
"fallenHostileRobotColor": "#7f0000",
"fallenTeamRobotColor": "#00007f",
"ballColor": "#000000",
"freeKickAreaColor": "#ffffff",
"penWidth": 0.03
}
}
self.settings_to_ui = {
"center_x": (
lambda: self.ui.spin_center_x.value(),
lambda value: self.ui.spin_center_x.setValue(value)),
"center_y": (
lambda: self.ui.spin_center_y.value(),
lambda value: self.ui.spin_center_y.setValue(value)),
"transformation": (
lambda: {
"key": self.ui.cbx_TransformationKey.currentText(),
"key_lambda":
self.ui.edit_TransformationKeyLambda.toPlainText()
},
lambda settings: [
self.ui.cbx_TransformationKey.setCurrentText(
settings["key"]),
self.ui.edit_TransformationKeyLambda.setPlainText(
settings["key_lambda"])
]
),
"obstacles": (
lambda: {
"key": self.ui.cbx_ObstacleDataKey.currentText(),
"key_lambda":
self.ui.edit_ObstacleDataKeyLambda.toPlainText(),
"goalPostColor":
self.ui.edit_goalPostColor.text(),
"unknownColor":
self.ui.edit_unknownColor.text(),
"anonymousRobotColor":
self.ui.edit_anonymousRobotColor.text(),
"hostileRobotColor":
self.ui.edit_hostileRobotColor.text(),
"teamRobotColor":
self.ui.edit_teamRobotColor.text(),
"fallenAnonymousRobotColor":
self.ui.edit_fallenAnonymousRobotColor.text(),
"fallenHostileRobotColor":
self.ui.edit_fallenHostileRobotColor.text(),
"fallenTeamRobotColor":
self.ui.edit_fallenTeamRobotColor.text(),
"ballColor":
self.ui.edit_ballColor.text(),
"freeKickAreaColor":
self.ui.edit_freeKickAreaColor.text(),
"penWidth":
self.ui.spin_penWidth.value()},
lambda settings: [
self.ui.cbx_ObstacleDataKey.setCurrentText(
settings["key"]),
self.ui.edit_ObstacleDataKeyLambda.setPlainText(
settings["key_lambda"]),
ui_utils.reset_textField_color(
self.ui.edit_goalPostColor,
settings["goalPostColor"]),
ui_utils.reset_textField_color(
self.ui.edit_unknownColor,
settings["unknownColor"]),
ui_utils.reset_textField_color(
self.ui.edit_anonymousRobotColor,
settings["anonymousRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_hostileRobotColor,
settings["hostileRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_teamRobotColor,
settings["teamRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_fallenAnonymousRobotColor,
settings["fallenAnonymousRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_fallenHostileRobotColor,
settings["fallenHostileRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_fallenTeamRobotColor,
settings["fallenTeamRobotColor"]),
ui_utils.reset_textField_color(
self.ui.edit_ballColor,
settings["ballColor"]),
ui_utils.reset_textField_color(
self.ui.edit_freeKickAreaColor,
settings["freeKickAreaColor"]),
self.ui.spin_penWidth.setValue(
settings["penWidth"])]
)
}
self.ui.cbx_TransformationKey.completer().setFilterMode(
qtc.Qt.MatchContains)
self.ui.cbx_TransformationKey.completer().setCompletionMode(
qtw.QCompleter.PopupCompletion)
self.ui.cbx_ObstacleDataKey.completer().setFilterMode(
qtc.Qt.MatchContains)
self.ui.cbx_ObstacleDataKey.completer().setCompletionMode(
qtw.QCompleter.PopupCompletion)
ui_utils.init_Color_UI(
self.ui.btn_goalPostColor,
self.ui.edit_goalPostColor)
ui_utils.init_Color_UI(
self.ui.btn_unknownColor,
self.ui.edit_unknownColor)
ui_utils.init_Color_UI(
self.ui.btn_anonymousRobotColor,
self.ui.edit_anonymousRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_hostileRobotColor,
self.ui.edit_hostileRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_teamRobotColor,
self.ui.edit_teamRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_fallenAnonymousRobotColor,
self.ui.edit_fallenAnonymousRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_fallenHostileRobotColor,
self.ui.edit_fallenHostileRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_fallenTeamRobotColor,
self.ui.edit_fallenTeamRobotColor)
ui_utils.init_Color_UI(
self.ui.btn_ballColor,
self.ui.edit_ballColor)
ui_utils.init_Color_UI(
self.ui.btn_freeKickAreaColor,
self.ui.edit_freeKickAreaColor)
self.ui.btnAccept.pressed.connect(self.accept)
self.ui.btnDiscard.pressed.connect(self.discard)
self.reset_widgets()
if self.nao.is_connected():
self.connect(self.nao)
def connect(self, nao: nao.Nao):
self.nao = nao
self.fill_cbx()
self.nao.debug_protocol.subscribe_msg_type(
net_utils.DebugMsgType.list, self.identifier, self.fill_cbx)
def closeEvent(self, event):
if self.nao.is_connected():
self.nao.debug_protocol.unsubscribe_msg_type(
net_utils.DebugMsgType.list, self.identifier)
def fill_cbx(self):
ui_utils.init_cbx(
self.ui.cbx_TransformationKey,
self.layer["settings"]["transformation"]["key"],
self.nao.debug_data)
ui_utils.init_cbx(
self.ui.cbx_ObstacleDataKey,
self.layer["settings"]["obstacles"]["key"],
self.nao.debug_data)
def reset_widgets(self):
self.ui.nameObstacleDataEdit.setText(self.layer["name"])
self.ui.enabledCheckBox.setChecked(self.layer["enabled"])
for key in self.layer["settings"]:
self.settings_to_ui[key][1](self.layer["settings"][key])
def accept(self):
self.layer["name"] = self.ui.nameObstacleDataEdit.text()
self.layer["enabled"] = self.ui.enabledCheckBox.isChecked()
for key in self.layer["settings"]:
self.layer["settings"][key] = self.settings_to_ui[key][0]()
self.update_callback()
def discard(self):
self.reset_widgets()
|
import urllib
import json
import pprint
import mysql.connector
#
#create table information ( id int, score int, author varchar(500), title varchar(500), venue varchar(500), volume varchar(500), pages int, year int, type varchar(255), site varchar(255))
cnx = mysql.connector.connect(user='root', host='127.0.0.1', database='bigdata')
mycursor = cnx.cursor()
small_letters = map(chr, range(ord('a'), ord('z')+1))
for c in small_letters:
link = "http://dblp.org/search/publ/api?q="+c+"&c=100000&format=json"
f = urllib.urlopen(link)
str = f.read()
obj = json.loads(str)
for i in range(0,len(obj['result']['hits']['hit'])):
keys = (obj['result']['hits']['hit'])[i].keys()
if '@id' in keys:
id = obj['result']['hits']['hit'][i]['@id']
else :
continue
if '@score' in keys:
score = obj['result']['hits']['hit'][i]['@score']
else :
continue
if 'author' in (obj['result']['hits']['hit'][i]['info']['authors']).keys():
author = obj['result']['hits']['hit'][i]['info']['authors']['author'][0]
else :
continue
keys = (obj['result']['hits']['hit'][i]['info']).keys()
if 'title' in keys:
if (obj['result']['hits']['hit'][i]['info']['title']).isnumeric():
continue
else:
title = obj['result']['hits']['hit'][i]['info']['title']
else:
continue
if 'venue' in keys:
venue = obj['result']['hits']['hit'][i]['info']['venue']
else :
continue
if 'volume' in keys:
volume = obj['result']['hits']['hit'][i]['info']['volume']
else :
continue
if 'pages' in keys:
pages = 2#obj['result']['hits']['hit'][i]['info']['pages']
else :
continue
if 'year' in keys:
year = obj['result']['hits']['hit'][i]['info']['year']
else :
continue
if 'type' in keys:
type = obj['result']['hits']['hit'][i]['info']['type']
else :
continue
if 'ee' in keys:
site = obj['result']['hits']['hit'][i]['info']['ee']
else :
continue
print i
sql = "SELECT * FROM information where id = "+id;
mycursor.execute(sql)
myresult = mycursor.fetchall()
if myresult == []:
sql = "INSERT INTO information (id, score, author, title, venue, volume, pages, year, type, site) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (id, score, author, title, venue, volume, 10, year, type, site)
try:
mycursor.execute(sql, val)
print 'add'
except:
continue
else :
print 'added'
print "\n"
cnx.commit()
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class TianqiPipeline(object):
def process_item(self, item, spider):
return item
class MojiPipeline(object):
def process_item(self, item, MojiSpider):
keyA = item["shengfenName"][0] + item["shiquName"][0]
itemdict = {}
datelist = item["date"]
weatherlist = item["weather"]
highlist = item["high"]
lowlist = item["low"]
for num in range(len(datelist)):
itemdict[datelist[num]] = list([weatherlist[num], highlist[num], lowlist[num]])
item = {keyA: itemdict}
yield item
class MongoPipeline(object):
def __init__(self, mongo_url, mongo_db):
self.mongo_url = mongo_url
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_url = crawler.settings.get('MONGO_URL'),
mongo_db = crawler.settings.get('MONGO_DB')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_url)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
name = item.__class__.__name__
self.db[name].insert(item)
return item
def close_spider(self, spider):
self.client.close() |
# -*- coding:utf-8 -*-
# @Desc : 图片验证码与短信验证码
# @Author : Administrator
# @Date : 2019-09-20 20:05
# from iHome.tasks.task_sms import send_sms
from iHome.tasks.sms.tasks import send_sms
from . import api
from iHome.utils.captcha.captcha import captcha
from iHome import redis_store, constants
from flask import current_app, jsonify, make_response, request
from iHome.utils.response_code import RET
from iHome.models import User
from iHome.libs.yuntongxun.sms import CCP
import random
# GET http://127.0.0.1:5000/api/v1.0/image_codes/<image_code_id>
@api.route('/image_codes/<image_code_id>')
def get_image_code(image_code_id):
"""
获取图片验证码
:param image_code_id: 图片验证码编号
:return: 正常:返回验证码图片 异常:返回Json数据
"""
# 1.获取参数
# 2.校验参数
# 3.业务逻辑处理
# 生成验证码图片
# 名字 真实文本 图片数据
name, text, image_data = captcha.generate_captcha()
# 将验证码真实值与编号保存到redis中,设置有效期
# redis中的数据类型: 字符串(string) 列表(list) 哈希(hash) 集合(set) 有序集合(zset)
# 如: key-value(字符串), key-value(列表), key-value(哈希), key-value(集合), key-value(有序集合)
# 使用哈希维护有效期的时候只能整体设置
# redis保存数据类型key-value(哈希),"image_code":{"id1":"vaule1","id2":"vaule2"} ---> 添加数据: hset("image_code", name, text)
# 单条维护记录,使用字符串类型,如 "image_code_编号":"真实文本值"
# redis_store.set("image_code_%s" %image_code_id, text) # 保存验证码数据到redis中
# redis_store.expire("image_code_%s" %image_code_id, constants.IMAGE_CODE_REDIS_EXPIRE) # 设置保存的验证码数据的有效期
try:
redis_store.setex("image_code_%s" % image_code_id, constants.IMAGE_CODE_REDIS_EXPIRE, text)
except Exception as e:
# 把捕获的异常信息写入到日志文件中
current_app.logger.error(e)
# return jsonify(errno=RET.DATAERR, errmsg="save image code id failed")
return jsonify(erron=RET.DATAERR, errmsg="图片验证码保存失败")
# 4.返回值
# 返回图片
resp = make_response(image_data)
resp.headers['Content-Type'] = "image/jpg"
return resp
# # GET http://127.0.0.1:5000/api/v1.0/sms_codes/<mobile>?image_code=xxx&image_code_id=yyy
# @api.route('/sms_codes/<re(r"1[34578]\d{9}"):mobile>')
# def get_sms_code(mobile):
# """获取短信验证码"""
#
# # 1.获取参数
# image_code = request.args.get("image_code")
# image_code_id = request.args.get("image_code_id")
#
# # 2.校验参数
# # 参数是否完整
# if not all([image_code, image_code_id]):
# # 表示参数不完整
# return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
#
# # 3.业务逻辑处理
# # 从redis中获取真实的图片验证码值
# try:
# real_image_code = redis_store.get("image_code_%s" % image_code_id)
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
# return jsonify(errno=RET.DBERR, errmsg="redis数据库异常")
#
# # 判断获取的真实图片验证码是否过期
# if real_image_code is None:
# # 表示图片验证码没有或过期
# return jsonify(errno=RET.NODATA, errmsg="图片验证码失效")
#
# # 删除redis中的图片验证码: 防止用户使用同一个图片验证码验证多次(使用多次)
# try:
# redis_store.delete("image_code_%s" % image_code_id)
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
#
# # 与用户填写的验证码进行对比
# if real_image_code.lower() != image_code.lower():
# # 表示用户填写错误
# return jsonify(errno=RET.DATAERR, errmsg="图片验证码错误")
#
# # 判断对于这个手机号的操作,在60秒内有没有之前的记录,如果有,则认为是用户操作频繁,不接受处理
# try:
# send_flag = redis_store.get("send_sms_code_%s" % mobile)
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
# else:
# if send_flag is not None:
# # 表示在60秒内之前有发送过的记录
# return jsonify(errno=RET.REQERR, errmsg="请求过于频繁,请60秒后再试")
#
# # 判断注册的手机号是否存在
# try:
# user = User.query.filter_by(mobile=mobile).first()
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
# else:
# if user is not None:
# # 表示手机号存在
# return jsonify(errno=RET.DATAEXIST, errmsg="手机号已存在")
#
# # 如果手机号不存在,则生成短信验证码
# sms_code = "%06d" % random.randint(0, 999999)
#
# # 保存真实的短信验证码到redis中
# try:
# redis_store.setex("sms_code_%s" % mobile, constants.SMS_CODE_REDIS_EXPIRE, sms_code)
# # 保存发送给这个手机号的验证码记录,防止用户在60秒内再次触发发送短信验证码的操作
# # redis_store.setex("send_sms_code_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
# redis_store.setex("send_sms_code_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, "保存发送验证码记录,防止60秒内再次发送的操作")
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
# return jsonify(errno=RET.DATAERR, errmsg="保存短信验证码异常")
#
# # 发送短信
# try:
# cpp = CCP()
# result = cpp.send_Template_SMS(mobile, [sms_code, int(constants.SMS_CODE_REDIS_EXPIRE / 60)], 1)
# except Exception as e:
# # 记录日志
# current_app.logger.error(e)
# return jsonify(errno=RET.THIRDERR, errmsg="发送异常")
#
# # 4.返回值
# if result == 0:
# # 表示短信发送成功
# return jsonify(errno=RET.OK, errmsg="发送成功")
# else:
# # 表示短信发送失败
# return jsonify(errno=RET.THIRDERR, errmsg="发送失败")
# GET http://127.0.0.1:5000/api/v1.0/sms_codes/<mobile>?image_code=xxx&image_code_id=yyy
@api.route('/sms_codes/<re(r"1[34578]\d{9}"):mobile>')
def get_sms_code(mobile):
"""使用celery异步任务发送短信验证码"""
# 1.获取参数
image_code = request.args.get("image_code")
image_code_id = request.args.get("image_code_id")
# 2.校验参数
# 参数是否完整
if not all([image_code, image_code_id]):
# 表示参数不完整
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
# 3.业务逻辑处理
# 从redis中获取真实的图片验证码值
try:
real_image_code = redis_store.get("image_code_%s" % image_code_id)
except Exception as e:
# 记录日志
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="redis数据库异常")
# 判断获取的真实图片验证码是否过期
if real_image_code is None:
# 表示图片验证码没有或过期
return jsonify(errno=RET.NODATA, errmsg="图片验证码失效")
# 删除redis中的图片验证码: 防止用户使用同一个图片验证码验证多次(使用多次)
try:
redis_store.delete("image_code_%s" % image_code_id)
except Exception as e:
# 记录日志
current_app.logger.error(e)
# 与用户填写的验证码进行对比
if real_image_code.lower() != image_code.lower():
# 表示用户填写错误
return jsonify(errno=RET.DATAERR, errmsg="图片验证码错误")
# 判断对于这个手机号的操作,在60秒内有没有之前的记录,如果有,则认为是用户操作频繁,不接受处理
try:
send_flag = redis_store.get("send_sms_code_%s" % mobile)
except Exception as e:
# 记录日志
current_app.logger.error(e)
else:
if send_flag is not None:
# 表示在60秒内之前有发送过的记录
return jsonify(errno=RET.REQERR, errmsg="请求过于频繁,请60秒后再试")
# 判断注册的手机号是否存在
try:
user = User.query.filter_by(mobile=mobile).first()
except Exception as e:
# 记录日志
current_app.logger.error(e)
else:
if user is not None:
# 表示手机号存在
return jsonify(errno=RET.DATAEXIST, errmsg="手机号已存在")
# 如果手机号不存在,则生成短信验证码
sms_code = "%06d" % random.randint(0, 999999)
# 保存真实的短信验证码到redis中
try:
redis_store.setex("sms_code_%s" % mobile, constants.SMS_CODE_REDIS_EXPIRE, sms_code)
# 保存发送给这个手机号的验证码记录,防止用户在60秒内再次触发发送短信验证码的操作
# redis_store.setex("send_sms_code_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
redis_store.setex("send_sms_code_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, "保存发送验证码记录,防止60秒内再次发送的操作")
except Exception as e:
# 记录日志
current_app.logger.error(e)
return jsonify(errno=RET.DATAERR, errmsg="保存短信验证码异常")
# 发送短信
# 使用celery异步发送手机短信验证码,delay调用后立即返回
send_sms.delay(mobile, [sms_code, int(constants.SMS_CODE_REDIS_EXPIRE / 60)], 1)
# 4.返回值
return jsonify(errno=RET.OK, errmsg="发送成功")
|
"""Codewars test converted to pytest and expanded."""
import pytest
numbers_table = [
[-1, 1],
[0, 0],
[1, -1]
]
@pytest.mark.parametrize('input, output', numbers_table)
def test_opposite(input, output):
"""Test opposite kata function works."""
from opposite import opposite
assert opposite(input) == output
|
"""给定一个整数数组,判断是否存在重复元素。
如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。
示例 1:
输入: [1,2,3,1]
输出: true
"""
class Solution:
def containsDuplicate(self, nums):
return not len(nums)==len(set(nums)) |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 17-10-19 8:53 PM
@author: limengyan
"""
import os, time, pickle, random, time
from datetime import datetime
import numpy as np
from time import localtime, strftime
import logging, scipy
import tensorflow as tf
import tensorlayer as tl
from model import *
from utils import *
from config import config, log_config
batch_size = config.TRAIN.batch_size
lr_init = config.TRAIN.lr_init
beta1 = config.TRAIN.beta1
## initialize G
n_epoch_init = config.TRAIN.n_epoch_init
## adversarial learning (SRGAN)
n_epoch = config.TRAIN.n_epoch
lr_decay = config.TRAIN.lr_decay
decay_every = config.TRAIN.decay_every
ni = int(np.sqrt(batch_size))
def train():
## create folders to save result images and trained model
save_dir_ginit = "samples/{}_ginit".format(tl.global_flag['mode'])
save_dir_gan = "samples/{}_gan".format(tl.global_flag['mode'])
tl.files.exists_or_mkdir(save_dir_ginit)
tl.files.exists_or_mkdir(save_dir_gan)
checkpoint_dir = "checkpoint" # checkpoint_resize_conv
tl.files.exists_or_mkdir(checkpoint_dir)
###====================== PRE-LOAD DATA ===========================###
train_hr_img_list = sorted(tl.files.load_file_list(path=config.TRAIN.hr_img_path, regx='.*.png', printable=False))
train_lr_img_list = sorted(tl.files.load_file_list(path=config.TRAIN.lr_img_path, regx='.*.png', printable=False))
valid_hr_img_list = sorted(tl.files.load_file_list(path=config.VALID.hr_img_path, regx='.*.png', printable=False))
valid_lr_img_list = sorted(tl.files.load_file_list(path=config.VALID.lr_img_path, regx='.*.png', printable=False))
## If your machine have enough memory, please pre-load the whole train set.
train_hr_imgs = read_all_imgs(train_hr_img_list, path=config.TRAIN.hr_img_path, n_threads=32)
###========================== DEFINE MODEL ============================###
## train inference
t_image = tf.placeholder('float32', [batch_size, 96, 96, 3], name='t_image_input_to_SRGAN_generator')
t_target_image = tf.placeholder('float32', [batch_size, 384, 384, 3], name='t_target_image')
net_g = SR(t_image, is_train=True, reuse=False)
net_g.print_params(False)
## test inference
net_g_test = SR(t_image, is_train=False, reuse=True)
###========================== DEFINE TRAIN OPS ==========================###
mse_loss = tl.cost.mean_squared_error(net_g.outputs , t_target_image, is_mean=True)
# vgg_loss = 2e-6 * tl.cost.mean_squared_error(vgg_predict_emb.outputs, vgg_target_emb.outputs, is_mean=True)
g_vars = tl.layers.get_variables_with_name('SR', True, True)
with tf.variable_scope('learning_rate'):
lr_v = tf.Variable(lr_init, trainable=False)
## Pretrain
g_optim_init = tf.train.AdamOptimizer(lr_v, beta1=beta1).minimize(mse_loss, var_list=g_vars)
###========================== RESTORE MODEL =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
if tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/g_{}.npz'.format(tl.global_flag['mode']), network=net_g) is False:
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/g_{}_init.npz'.format(tl.global_flag['mode']), network=net_g)
###============================= TRAINING ===============================###
sample_imgs = train_hr_imgs[0:batch_size]
sample_imgs_384 = tl.prepro.threading_data(sample_imgs, fn=crop_sub_imgs_fn, is_random=False)
print('sample HR sub-image:',sample_imgs_384.shape, sample_imgs_384.min(), sample_imgs_384.max())
sample_imgs_96 = tl.prepro.threading_data(sample_imgs_384, fn=downsample_fn)
print('sample LR sub-image:', sample_imgs_96.shape, sample_imgs_96.min(), sample_imgs_96.max())
tl.vis.save_images(sample_imgs_96, [ni, ni], save_dir_ginit+'/_train_sample_96.png')
tl.vis.save_images(sample_imgs_384, [ni, ni], save_dir_ginit+'/_train_sample_384.png')
tl.vis.save_images(sample_imgs_96, [ni, ni], save_dir_gan+'/_train_sample_96.png')
tl.vis.save_images(sample_imgs_384, [ni, ni], save_dir_gan+'/_train_sample_384.png')
###========================= initialize G ====================###
## fixed learning rate
sess.run(tf.assign(lr_v, lr_init))
print(" ** fixed learning rate: %f (for init G)" % lr_init)
for epoch in range(0, n_epoch_init+1):
epoch_time = time.time()
total_mse_loss, n_iter = 0, 0
## If your machine have enough memory, please pre-load the whole train set.
for idx in range(0, len(train_hr_imgs), batch_size):
step_time = time.time()
b_imgs_384 = tl.prepro.threading_data(
train_hr_imgs[idx : idx + batch_size],
fn=crop_sub_imgs_fn, is_random=True)
b_imgs_96 = tl.prepro.threading_data(b_imgs_384, fn=downsample_fn)
## update G
errM, _ = sess.run([mse_loss, g_optim_init], {t_image: b_imgs_96, t_target_image: b_imgs_384})
print("Epoch [%2d/%2d] %4d time: %4.4fs, mse: %.8f " % (epoch, n_epoch_init, n_iter, time.time() - step_time, errM))
total_mse_loss += errM
n_iter += 1
log = "[*] Epoch: [%2d/%2d] time: %4.4fs, mse: %.8f" % (epoch, n_epoch_init, time.time() - epoch_time, total_mse_loss/n_iter)
print(log)
## quick evaluation on train set
if (epoch != 0) and (epoch % 10 == 0):
out = sess.run(net_g_test.outputs, {t_image: sample_imgs_96})#; print('gen sub-image:', out.shape, out.min(), out.max())
print("[*] save images")
tl.vis.save_images(out, [ni, ni], save_dir_ginit+'/train_%d.png' % epoch)
## save model
if (epoch != 0) and (epoch % 10 == 0):
tl.files.save_npz(net_g.all_params, name=checkpoint_dir+'/g_{}_init.npz'.format(tl.global_flag['mode']), sess=sess)
###========================= train GAN (SRGAN) =========================###
for epoch in range(0, n_epoch+1):
## update learning rate
if epoch !=0 and (epoch % decay_every == 0):
new_lr_decay = lr_decay ** (epoch // decay_every)
sess.run(tf.assign(lr_v, lr_init * new_lr_decay))
log = " ** new learning rate: %f (for GAN)" % (lr_init * new_lr_decay)
print(log)
elif epoch == 0:
sess.run(tf.assign(lr_v, lr_init))
log = " ** init lr: %f decay_every_init: %d, lr_decay: %f (for GAN)" % (lr_init, decay_every, lr_decay)
print(log)
epoch_time = time.time()
total_mse_loss, n_iter = 0, 0
## If your machine have enough memory, please pre-load the whole train set.
for idx in range(0, len(train_hr_imgs), batch_size):
step_time = time.time()
b_imgs_384 = tl.prepro.threading_data(
train_hr_imgs[idx : idx + batch_size],
fn=crop_sub_imgs_fn, is_random=True)
b_imgs_96 = tl.prepro.threading_data(b_imgs_384, fn=downsample_fn)
## update G
errM, _ = sess.run([mse_loss, g_optim_init],
{t_image: b_imgs_96, t_target_image: b_imgs_384})
print("Epoch [%2d/%2d] %4d time: %4.4fs, mse: %.8f " % (epoch, n_epoch_init, n_iter, time.time() - step_time, errM))
total_mse_loss += errM
n_iter += 1
log = "[*] Epoch: [%2d/%2d] time: %4.4fs, mse_loss: %.8f" \
% (epoch, n_epoch, time.time() - epoch_time, total_mse_loss/n_iter)
print(log)
## quick evaluation on train set
if (epoch != 0) and (epoch % 10 == 0):
out = sess.run(net_g_test.outputs, {t_image: sample_imgs_96})
print("[*] save images")
tl.vis.save_images(out, [ni, ni], save_dir_gan+'/train_%d.png' % epoch)
## save model
if (epoch != 0) and (epoch % 10 == 0):
tl.files.save_npz(net_g.all_params, name=checkpoint_dir+'/g_{}.npz'.format(tl.global_flag['mode']), sess=sess)
|
from datetime import datetime
from random import randint
from time import sleep
import boto3
dynamodb = boto3.resource('dynamodb')
batchnumber = 2000
table = dynamodb.Table('ddbstream2')
acc =0
while True:
with table.batch_writer() as batch:
for i in range(batchnumber):
batch.put_item(
Item={
'id': str(abs(hash(datetime.utcnow().isoformat()+'devicename'))),
'timestamp': datetime.utcnow().isoformat(),
'temp': randint(0, 99),
'mois': randint(0, 99),
'thre': randint(0, 30)
}
)
acc += batchnumber
print(str(acc))
sleep(1)
pass
|
import heapq # 它可以用来实现优先队列
import random
list = random.sample([i for i in range(100)], 10)
print(list)
heapq.heapify(list) # 构建堆的过程(默认小根堆)
for i in range(len(list)):
# 每次弹出最小的数
print(heapq.heappop(list), end=",") |
from django.contrib import admin
from accounts.models import Follow
# Register your models here.
admin.site.register(Follow) |
##############################################################################
#
# Copyright (c) 2007 Agendaless Consulting and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the BSD-like license at
# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
# this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
# FITNESS FOR A PARTICULAR PURPOSE
#
##############################################################################
__version__ = '0.7.1'
import os
from setuptools import setup, find_packages
requires = []
try:
# Available from Python >= 2.5
from sys import _current_frames
except ImportError:
# Otherwise, depend on threadframe, which provide the same functionality as
# the function in Python >= 2.5
requires.append('threadframe')
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(name='repoze.debug',
version=__version__,
description='Forensic debugging WSGI middleware',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
],
keywords='wsgi request response debug middleware',
author="Agendaless Consulting",
author_email="repoze-dev@lists.repoze.org",
url="http://www.repoze.org",
license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
packages=find_packages(),
include_package_data=True,
namespace_packages=['repoze'],
zip_safe=False,
tests_require = [
'Paste',
'WebOb',
],
install_requires = [
'Paste',
'WebOb',
] + requires,
test_suite="repoze.debug.tests",
entry_points = """\
[paste.filter_app_factory]
responselogger = repoze.debug.responselogger:make_middleware
canary = repoze.debug.canary:make_middleware
pdbpm = repoze.debug.pdbpm:make_middleware
threads = repoze.debug.threads:make_middleware
[console_scripts]
wsgirequestprofiler = repoze.debug.scripts.requestprofiler:main
"""
)
|
from __future__ import division
import six
import struct
import zlib
from erlastic.compat import *
from erlastic.constants import *
from erlastic.types import *
__all__ = ["ErlangTermEncoder", "ErlangTermDecoder", "EncodingError"]
class EncodingError(Exception):
pass
class ErlangTermDecoder(object):
def __init__(self):
# Cache decode functions to avoid having to do a getattr
self.decoders = {}
for k in self.__class__.__dict__:
v = getattr(self, k)
if callable(v) and k.startswith('decode_'):
try: self.decoders[int(k.split('_')[1])] = v
except Exception: pass
def decode(self, buf, offset=0):
version = six.indexbytes(buf, offset)
if version != FORMAT_VERSION:
raise EncodingError("Bad version number. Expected %d found %d" % (FORMAT_VERSION, version))
return self.decode_part(buf, offset+1)[0]
def decode_part(self, buf, offset=0):
return self.decoders[six.indexbytes(buf, offset)](buf, offset+1)
def decode_97(self, buf, offset):
"""SMALL_INTEGER_EXT"""
return six.indexbytes(buf, offset), offset+1
def decode_98(self, buf, offset):
"""INTEGER_EXT"""
return struct.unpack(">l", buf[offset:offset+4])[0], offset+4
def decode_99(self, buf, offset):
"""FLOAT_EXT"""
return float(buf[offset:offset+31].split(six.b('\x00'), 1)[0]), offset+31
def decode_70(self, buf, offset):
"""NEW_FLOAT_EXT"""
return struct.unpack(">d", buf[offset:offset+8])[0], offset+8
def decode_100(self, buf, offset):
"""ATOM_EXT"""
atom_len = struct.unpack(">H", buf[offset:offset+2])[0]
atom = buf[offset+2:offset+2+atom_len]
return self.convert_atom(atom), offset+atom_len+2
def decode_115(self, buf, offset):
"""SMALL_ATOM_EXT"""
atom_len = six.indexbytes(buf, offset)
atom = buf[offset+1:offset+1+atom_len]
return self.convert_atom(atom), offset+atom_len+1
def decode_119(self, buf, offset):
"""SMALL_ATOM_UTF8_EXT"""
atom_len = buf[offset]
atom = buf[offset+1:offset+1+atom_len]
return self.convert_atom(atom), offset+atom_len+1
def decode_118(self, buf, offset):
"""ATOM_UTF8_EXT"""
atom_len = struct.unpack(">H", buf[offset:offset+2])[0]
atom = buf[offset+2:offset+2+atom_len]
return self.convert_atom(atom), offset+atom_len+1
def decode_104(self, buf, offset):
"""SMALL_TUPLE_EXT"""
arity = six.indexbytes(buf, offset)
offset += 1
items = []
for i in range(arity):
val, offset = self.decode_part(buf, offset)
items.append(val)
return tuple(items), offset
def decode_105(self, buf, offset):
"""LARGE_TUPLE_EXT"""
arity = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
items = []
for i in range(arity):
val, offset = self.decode_part(buf, offset)
items.append(val)
return tuple(items), offset
def decode_106(self, buf, offset):
"""NIL_EXT"""
return [], offset
def decode_107(self, buf, offset):
"""STRING_EXT"""
length = struct.unpack(">H", buf[offset:offset+2])[0]
offset += 2
ints = []
for i in range(length):
ints.append(six.indexbytes(buf, offset+i))
return ints, offset+length
def decode_108(self, buf, offset):
"""LIST_EXT"""
length = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
items = []
for i in range(length):
val, offset = self.decode_part(buf, offset)
items.append(val)
tail, offset = self.decode_part(buf, offset)
if tail != []:
# TODO: Not sure what to do with the tail
raise NotImplementedError("Lists with non empty tails are not supported")
return items, offset
def decode_116(self, buf, offset):
"""MAP_EXT"""
length = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
items = {}
for i in range(length):
key, offset = self.decode_part(buf, offset)
val, offset = self.decode_part(buf, offset)
items[key]=val
return items, offset
def decode_109(self, buf, offset):
"""BINARY_EXT"""
length = struct.unpack(">L", buf[offset:offset+4])[0]
return buf[offset+4:offset+4+length], offset+4+length
def decode_110(self, buf, offset):
"""SMALL_BIG_EXT"""
n = six.indexbytes(buf, offset)
offset += 1
return self.decode_bigint(n, buf, offset)
def decode_111(self, buf, offset):
"""LARGE_BIG_EXT"""
n = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
return self.decode_bigint(n, buf, offset)
def decode_112(self, buf, offset):
"""NEW_FUN_EXT"""
size = struct.unpack(">L", buf[offset:offset+4])[0]
arity = ord(buf[4])
return "f/%d"%(arity), offset+size
def decode_bigint(self, n, buf, offset):
sign = six.indexbytes(buf, offset)
offset += 1
b = 1
val = 0
for i in range(n):
val += six.indexbytes(buf, offset) * b
b <<= 8
offset += 1
if sign != 0:
val = -val
return val, offset
def decode_101(self, buf, offset):
"""REFERENCE_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing REFERENCE_EXT, found %r instead" % node)
reference_id, creation = struct.unpack(">LB", buf[offset:offset+5])
return Reference(node, [reference_id], creation), offset+5
def decode_114(self, buf, offset):
"""NEW_REFERENCE_EXT"""
id_len = struct.unpack(">H", buf[offset:offset+2])[0]
node, offset = self.decode_part(buf, offset+2)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing NEW_REFERENCE_EXT, found %r instead" % node)
creation = six.indexbytes(buf, offset)
reference_id = struct.unpack(">%dL" % id_len, buf[offset+1:offset+1+4*id_len])
return Reference(node, reference_id, creation), offset+1+4*id_len
def decode_102(self, buf, offset):
"""PORT_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing PORT_EXT, found %r instead" % node)
port_id, creation = struct.unpack(">LB", buf[offset:offset+5])
return Port(node, port_id, creation), offset+5
def decode_103(self, buf, offset):
"""PID_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing PID_EXT, found %r instead" % node)
pid_id, serial, creation = struct.unpack(">LLB", buf[offset:offset+9])
return PID(node, pid_id, serial, creation), offset+9
def decode_113(self, buf, offset):
"""EXPORT_EXT"""
module, offset = self.decode_part(buf, offset)
if not isinstance(module, Atom):
raise EncodingError("Expected atom while parsing EXPORT_EXT, found %r instead" % module)
function, offset = self.decode_part(buf, offset)
if not isinstance(function, Atom):
raise EncodingError("Expected atom while parsing EXPORT_EXT, found %r instead" % function)
arity, offset = self.decode_part(buf, offset)
if not isinstance(arity, six.integer_types):
raise EncodingError("Expected integer while parsing EXPORT_EXT, found %r instead" % arity)
return Export(module, function, arity), offset+1
def decode_80(self, buf, offset):
"""Compressed term"""
usize = struct.unpack(">L", buf[offset:offset+4])[0]
buf = zlib.decompress(buf[offset+4:offset+4+usize])
return self.decode_part(buf, 0)
def convert_atom(self, atom):
if atom == b"true":
return True
elif atom == b"false":
return False
elif atom == b"none":
return None
return Atom(atom.decode('latin-1'))
class ErlangTermEncoder(object):
def __init__(self, encoding="utf-8", unicode_type="binary"):
self.encoding = encoding
self.unicode_type = unicode_type
def encode(self, obj, compressed=False):
ubuf = six.b('').join(self.encode_part(obj))
if compressed is True:
compressed = 6
if not (compressed is False \
or (isinstance(compressed, six.integer_types) \
and compressed >= 0 and compressed <= 9)):
raise TypeError("compressed must be True, False or "
"an integer between 0 and 9")
if compressed:
cbuf = zlib.compress(ubuf, compressed)
if len(cbuf) < len(ubuf):
usize = struct.pack(">L", len(ubuf))
ubuf = "".join([COMPRESSED, usize, cbuf])
return pack_bytes([FORMAT_VERSION]) + ubuf
def encode_part(self, obj):
if obj is False:
return [pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", 5), b"false"]
elif obj is True:
return [pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", 4), b"true"]
elif obj is None:
return [pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", 4), b"none"]
elif isinstance(obj, six.integer_types):
if 0 <= obj <= 255:
return [pack_bytes([SMALL_INTEGER_EXT,obj])]
elif -2147483648 <= obj <= 2147483647:
return [pack_bytes([INTEGER_EXT]), struct.pack(">l", obj)]
else:
sign = obj < 0
obj = abs(obj)
big_buf = []
while obj > 0:
big_buf.append(obj & 0xff)
obj >>= 8
if len(big_buf) < 256:
return [pack_bytes([SMALL_BIG_EXT,len(big_buf),sign]),
pack_bytes(big_buf)]
else:
return [pack_bytes([LARGE_BIG_EXT]),
struct.pack(">L", len(big_buf)),
pack_bytes([sign]), pack_bytes(big_buf)]
elif isinstance(obj, float):
floatstr = ("%.20e" % obj).encode('ascii')
return [pack_bytes([FLOAT_EXT]), floatstr + b"\x00"*(31-len(floatstr))]
elif isinstance(obj, Atom):
#st = obj.encode('latin-1')
st = obj.encode('utf-8')
return [pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", len(st)), st]
elif isinstance(obj, six.text_type):
st = obj.encode('utf-8')
return [pack_bytes([BINARY_EXT]), struct.pack(">L", len(st)), st]
elif isinstance(obj, six.string_types) or isinstance(obj, six.binary_type):
# https://pythonhosted.org/six/#six.string_types
# https://pythonhosted.org/six/#six.binary_type
return [pack_bytes([BINARY_EXT]), struct.pack(">L", len(obj)), obj]
elif isinstance(obj, tuple):
n = len(obj)
if n < 256:
buf = [pack_bytes([SMALL_TUPLE_EXT,n])]
else:
buf = [pack_bytes([LARGE_TUPLE_EXT]), struct.pack(">L", n)]
for item in obj:
buf += self.encode_part(item)
return buf
elif obj == []:
return [pack_bytes([NIL_EXT])]
elif isinstance(obj, list):
buf = [pack_bytes([LIST_EXT]), struct.pack(">L", len(obj))]
for item in obj:
buf += self.encode_part(item)
buf.append(pack_bytes([NIL_EXT])) # list tail - no such thing in Python
return buf
elif isinstance(obj, dict):
buf = [bytes([MAP_EXT]), struct.pack(">L",len(obj))]
for key in obj:
buf += self.encode_part(key)
buf += self.encode_part(obj[key])
return buf
elif isinstance(obj, Reference):
return [pack_bytes([NEW_REFERENCE_EXT]), struct.pack(">H", len(obj.ref_id)),
pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", len(obj.node)),
obj.node.encode('latin-1'), pack_bytes([obj.creation]),
struct.pack(">%dL" % len(obj.ref_id), *obj.ref_id)]
elif isinstance(obj, Port):
return [pack_bytes([PORT_EXT]), pack_bytes([ATOM_UTF8_EXT]),
struct.pack(">H", len(obj.node)), obj.node.encode('latin-1'),
struct.pack(">LB", obj.port_id, obj.creation)]
elif isinstance(obj, PID):
return [pack_bytes([PID_EXT]), pack_bytes([ATOM_UTF8_EXT]),
struct.pack(">H", len(obj.node)), obj.node.encode('latin-1'),
struct.pack(">LLB", obj.pid_id, obj.serial, obj.creation)]
elif isinstance(obj, Export):
return [pack_bytes([EXPORT_EXT]), pack_bytes([ATOM_UTF8_EXT]),
struct.pack(">H", len(obj.module)), obj.module.encode('latin-1'),
pack_bytes([ATOM_UTF8_EXT]), struct.pack(">H", len(obj.function)),
obj.function.encode('latin-1'), pack_bytes([SMALL_INTEGER_EXT,obj.arity])]
else:
raise NotImplementedError("Unable to serialize %r" % obj)
|
#!/usr/bin/env python3
#Se pide al usuario un numero
num = eval(input("Ingresa un numero: "))
# Se valida si el residuo es 0 sera par
if num % 2 == 0:
print("El numero es par")
else :
print("El numero es impar")
|
import cv2
import matplotlib.pyplot as plt
#カメラ起動
cap = cv2.VideoCapture(0)
while True:
#画像として読み込み
_,frame = cap.read()
#グレイスケール化
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
#ガウシアンフィルターで平滑化
frame = cv2.GaussianBlur(frame,(7,7),0)
#画像の2値化
frame = cv2.threshold(frame,120,240,cv2.THRESH_BINARY_INV)[1]
cv2.imshow('OpenCV Camera', frame)
#終了受付
k = cv2.waitKey(1)
if k==27 or k == 13: break
#カメラのリリース
cap.release()
cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.