text stringlengths 957 885k |
|---|
#!/usr/bin/env python
# vim:set ts=8 sw=4 sts=4 et:
# Copyright (c) 2007-2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
import getopt
import os
import pdb
import queue
import re
import subprocess
import sys
import threading
import time
# ------------------------------------------------------------------------------
PATH_PREFIX = '' # /usr/bin, /usr/local/bin, etc.
AFCONVERT = os.path.join(PATH_PREFIX, 'afconvert')
ATOMICPARSELEY = os.path.join(PATH_PREFIX, 'AtomicParsley')
FAAD = os.path.join(PATH_PREFIX, 'faad')
FLAC = os.path.join(PATH_PREFIX, 'flac')
LAME = os.path.join(PATH_PREFIX, 'lame')
MADPLAY = os.path.join(PATH_PREFIX, 'madplay')
METAFLAC = os.path.join(PATH_PREFIX, 'metaflac')
OGGDEC = os.path.join(PATH_PREFIX, 'oggdec')
OGGENC = os.path.join(PATH_PREFIX, 'oggenc')
VORBISCOMMENT = os.path.join(PATH_PREFIX, 'vorbiscomment')
TMPDIR = '/tmp'
NAME = 'piggy'
ALAC_FILE_EXTENSIONS = ['m4a']
AIFF_FILE_EXTENSIONS = ['aif', 'aiff']
FLAC_FILE_EXTENSIONS = ['flac']
MP3_FILE_EXTENSIONS = ['mp3']
MP4_FILE_EXTENSIONS = ['mp4', 'm4a']
VORBIS_FILE_EXTENSIONS = ['ogg']
WAVE_FILE_EXTENSIONS = ['wav']
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
EXIT_CMDFAILURE = 2
# TTY Colors
NOCOLOR = '\033[0m'
RED = '\033[01;31m'
GREEN = '\033[01;32m'
YELLOW = '\033[01;33m'
BLUE = '\033[01;34m'
MAGENTA = '\033[01;35m'
CYAN = '\033[01;36m'
WHITE = '\033[01;37m'
#WHITE = '\033[37;40m'
encoderSettings = [] # See below
# ------------------------------------------------------------------------------
def msg(s):
print(GREEN + '*', s, NOCOLOR)
def err(s):
print(RED + '!', s, NOCOLOR)
def dbg(s):
if __debug__:
print(YELLOW + '%', s, NOCOLOR)
def sep():
try:
columns = subprocess.getoutput('stty size').split()[1]
except IndexError:
columns = 80
print(WHITE, end='')
for i in range(int(columns)):
print('-', end='')
print(NOCOLOR)
class Timer(object):
def start(self):
self.startTime = int(time.time())
def stop(self):
self.endTime = int(time.time())
def timeDelta(self):
return self.endTime - self.startTime
def stringDelta(self):
total = self.timeDelta()
days = total // 86400
remain = total % 86400
hours = remain // 3600
remain = remain % 3600
minutes = remain // 60
seconds = remain % 60
return str(days) + 'd ' + str(hours) + 'h ' + str(minutes) + 'm ' + str(seconds) + 's'
def escape(str):
# TODO: This is probably not adequate nor secure
#
# escape strings: replace " with \"
r = re.compile('"')
s = r.sub('\\"', str)
return s
def numCores():
# From http://www.boduch.ca/2009/06/python-cpus.html
# TODO: make this more reliable
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
# Linux
num = os.sysconf('SC_NPROCESSORS_ONLN')
else:
# Mac OS X
num = int(subprocess.getoutput('sysctl -n hw.ncpu'))
if num < 1:
err('Could not determine the number of cores available')
return 1
else:
return num
def runProcess(s):
if __debug__:
print(CYAN + '>', s, NOCOLOR)
# TODO: return subprocess.call(s) should be adequate
p = subprocess.Popen(s, shell=True)
return p.wait()
def deleteFile(s):
dbg('Deleting ' + s)
os.remove(s)
# ------------------------------------------------------------------------------
class AudioFile(object):
def __init__(self, path):
# AudioFile objects should not exist unless their respective files exist
# on disk!
assert os.path.isfile(path)
assert os.access(path, os.R_OK)
self.path = path # Full path to the file on disk
self.dir, self.name = os.path.split(path) # Absolute directory and filename
self.name_noext = os.path.splitext(self.name)[0] # Filename without the period and the extension
self.decodedAudioFile = None # A decoded version of this file; typically a WaveAudioFile
self.tags = {} # { artist, album, title, track, year, comment }
self.loadTags()
def generateTempFileName(self, s):
# This is not meant to be secure. It'd be nice to use the tempfile
# module, but we just need a *name*, not a file handle.
# TODO: On Mac OS X, we can use `getconf DARWIN_USER_TEMP_DIR`
name = os.path.join(TMPDIR, NAME + '_' + str(time.time()) + '_' + s)
return name
def removeTemporaryFiles(self):
if self.decodedAudioFile:
deleteFile(self.decodedAudioFile.path)
self.decodedAudioFile = None
def decode(self):
pass
def loadTags(self):
pass
def setTags(self, artist, album, title, track, year, comment):
self.tags['artist'] = artist
self.tags['album'] = album
self.tags['title'] = title
self.tags['track'] = track
self.tags['year'] = year
self.tags['comment'] = comment
class PCMAudioFile(AudioFile):
def decode(self):
return self
class WaveAudioFile(PCMAudioFile):
pass
class AIFFAudioFile(PCMAudioFile):
pass
class CompressedAudioFile(AudioFile):
pass
class ALACAudioFile(CompressedAudioFile):
def decode(self):
if self.decodedAudioFile:
return self.decodedAudioFile
outputPath = self.generateTempFileName(self.name + '.wav')
# TODO: Hmm... we're assuming 16 bits per sample. Is this a good idea?
exitCode = runProcess(AFCONVERT + ' -f WAVE -d LEI16 "' + escape(self.path) + '" "' + escape(outputPath) + '"')
if exitCode == 0:
decodedAudioFile = WaveAudioFile(outputPath)
decodedAudioFile.tags = self.tags
self.decodedAudioFile = decodedAudioFile
return decodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
def loadTags(self):
tags = subprocess.getoutput(ATOMICPARSELEY + ' "' + escape(self.path) + '" -t')
ar = re.compile('Atom "©ART" contains: (.+)', re.IGNORECASE)
al = re.compile('Atom "©alb" contains: (.+)', re.IGNORECASE)
ti = re.compile('Atom "©nam" contains: (.+)', re.IGNORECASE)
tr = re.compile('Atom "trkn" contains: (.+)', re.IGNORECASE)
yr = re.compile('Atom "©day" contains: (\d+)', re.IGNORECASE)
# cm = re.compile('Atom "©cmt" contains: (.+)', re.IGNORECASE)
for line in tags.splitlines():
m = ar.match(line)
if m:
self.tags['artist'] = m.group(1)
m = al.match(line)
if m:
self.tags['album'] = m.group(1)
m = ti.match(line)
if m:
self.tags['title'] = m.group(1)
m = tr.match(line)
if m:
self.tags['track'] = m.group(1)
m = yr.match(line)
if m:
self.tags['year'] = m.group(1)
# m = cm.match(line)
# if m:
# self.tags['comment'] = m.group(1)
class XiphAudioFile(CompressedAudioFile):
def xiphLoadTags(self, tool):
tags = subprocess.getoutput(tool)
ar = re.compile('ARTIST=(.+)', re.IGNORECASE)
al = re.compile('ALBUM=(.+)', re.IGNORECASE)
ti = re.compile('TITLE=(.+)', re.IGNORECASE)
tr = re.compile('TRACKNUMBER=(.*)', re.IGNORECASE)
yr = re.compile('DATE=(.*)', re.IGNORECASE)
cm = re.compile('COMMENT=(.*)', re.IGNORECASE)
for line in tags.splitlines():
m = ar.match(line)
if m:
self.tags['artist'] = m.group(1)
m = al.match(line)
if m:
self.tags['album'] = m.group(1)
m = ti.match(line)
if m:
self.tags['title'] = m.group(1)
m = tr.match(line)
if m:
self.tags['track'] = m.group(1)
m = yr.match(line)
if m:
self.tags['year'] = m.group(1)
m = cm.match(line)
if m:
self.tags['comment'] = m.group(1)
class FLACAudioFile(XiphAudioFile):
def decode(self):
if self.decodedAudioFile:
return self.decodedAudioFile
outputPath = self.generateTempFileName(self.name + '.wav')
exitCode = runProcess(FLAC + ' --silent --decode -o "' + escape(outputPath) + '" "' + escape(self.path) + '"')
if exitCode == 0:
decodedAudioFile = WaveAudioFile(outputPath)
decodedAudioFile.tags = self.tags
self.decodedAudioFile = decodedAudioFile
return decodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
def loadTags(self):
self.xiphLoadTags(METAFLAC + ' --export-tags-to=- "' + escape(self.path) + '"')
class VorbisAudioFile(XiphAudioFile):
def decode(self):
if self.decodedAudioFile:
return self.decodedAudioFile
outputPath = self.generateTempFileName(self.name + '.wav')
exitCode = runProcess(OGGDEC + ' --quiet -o "' + escape(outputPath) + '" "' + escape(self.path) + '"')
if exitCode == 0:
decodedAudioFile = WaveAudioFile(outputPath)
decodedAudioFile.tags = self.tags
self.decodedAudioFile = decodedAudioFile
return decodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
def loadTags(self):
self.xiphLoadTags(VORBISCOMMENT + ' --list "' + escape(self.path) + '"')
class MP3AudioFile(CompressedAudioFile):
def decode(self):
if self.decodedAudioFile:
return self.decodedAudioFile
outputPath = self.generateTempFileName(self.name + '.wav')
exitCode = runProcess(MADPLAY + ' --quiet -o "' + escape(outputPath) + '" "' + escape(self.path) + '"')
if exitCode == 0:
decodedAudioFile = WaveAudioFile(outputPath)
decodedAudioFile.tags = self.tags
self.decodedAudioFile = decodedAudioFile
return decodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
def loadTags(self):
tags = subprocess.getoutput(MADPLAY + ' --show-tags-only "' + escape(self.path) + '"')
ar = re.compile('\s*artist: (.+)', re.IGNORECASE)
al = re.compile('\s*album: (.+)', re.IGNORECASE)
ti = re.compile('\s*title: (.+)', re.IGNORECASE)
tr = re.compile('\s*track: (.*)', re.IGNORECASE)
yr = re.compile('\s*year: (.*)', re.IGNORECASE)
# cm = re.compile('comment: (.*)', re.IGNORECASE)
for line in tags.splitlines():
m = ar.match(line)
if m:
self.tags['artist'] = m.group(1)
m = al.match(line)
if m:
self.tags['album'] = m.group(1)
m = ti.match(line)
if m:
self.tags['title'] = m.group(1)
m = tr.match(line)
if m:
self.tags['track'] = m.group(1)
m = yr.match(line)
if m:
self.tags['year'] = m.group(1)
# m = cm.match(line)
# if m:
# self.tags['comment'] = m.group(1)
class MP4AudioFile(CompressedAudioFile):
def decode(self):
if self.decodedAudioFile:
return self.decodedAudioFile
outputPath = self.generateTempFileName(self.name + '.wav')
exitCode = runProcess(FAAD + ' --quiet -o "' + escape(outputPath) + '" "' + escape(self.path) + '"')
if exitCode == 0:
decodedAudioFile = WaveAudioFile(outputPath)
decodedAudioFile.tags = self.tags
self.decodedAudioFile = decodedAudioFile
return decodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
def loadTags(self):
tags = subprocess.getoutput(FAAD + ' --info "' + escape(self.path) + '"')
ar = re.compile('artist: (.+)', re.IGNORECASE)
al = re.compile('album: (.+)', re.IGNORECASE)
ti = re.compile('title: (.+)', re.IGNORECASE)
tr = re.compile('track: (.*)', re.IGNORECASE)
yr = re.compile('date: (.*)', re.IGNORECASE)
# cm = re.compile('COMMENT=(.*)', re.IGNORECASE)
for line in tags.splitlines():
m = ar.match(line)
if m:
self.tags['artist'] = m.group(1)
m = al.match(line)
if m:
self.tags['album'] = m.group(1)
m = ti.match(line)
if m:
self.tags['title'] = m.group(1)
m = tr.match(line)
if m:
self.tags['track'] = m.group(1)
m = yr.match(line)
if m:
self.tags['year'] = m.group(1)
# m = cm.match(line)
# if m:
# self.tags['comment'] = m.group(1)
def makeAudioFile(path):
# TODO: Need a more intelligent way other than file extension to tell a
# filetype
if os.path.isfile(path) and os.access(path, os.R_OK):
name = os.path.basename(path)
try:
ext = os.path.splitext(name)[1].split('.')[1].lower()
except IndexError:
return None
# .m4a files can be either ALAC or MP4. Since ALAC comes first in this
# list, it masks MP4.
if ext in AIFF_FILE_EXTENSIONS:
return AIFFAudioFile(path)
elif ext in ALAC_FILE_EXTENSIONS:
return ALACAudioFile(path)
elif ext in FLAC_FILE_EXTENSIONS:
return FLACAudioFile(path)
elif ext in MP3_FILE_EXTENSIONS:
return MP3AudioFile(path)
elif ext in MP4_FILE_EXTENSIONS:
return MP4AudioFile(path)
elif ext in VORBIS_FILE_EXTENSIONS:
return VorbisAudioFile(path)
elif ext in WAVE_FILE_EXTENSIONS:
return WaveAudioFile(path)
return None
# ------------------------------------------------------------------------------
class AudioEncoder(object):
def __init__(self, opts):
self.opts = opts
def encode(self, audioFile, outputPath):
'''Decode the audioFile if necessary. Then, encode the
audioFile. The encoder adds the appropriate extension to
outputPath and writes the encoded file to that path.
Return the encoded AudioFile if successful, None otherwise
'''
pass
class ALACAudioEncoder(AudioEncoder):
def encode(self, audioFile, outputPath):
outputPath += '.m4a'
pcmAudioFile = audioFile.decode()
if pcmAudioFile == None:
return None
cmd = AFCONVERT + ' -d alac ' + self.opts + ' "' + escape(pcmAudioFile.path) + \
'" "' + escape(outputPath) + '"'
exitCode = runProcess(cmd)
if exitCode != 0:
try:
deleteFile(outputPath)
except OSError:
pass
return None
cmd = ATOMICPARSELEY + ' "' + escape(outputPath) + '" --overWrite'
if 'artist' in audioFile.tags:
cmd += ' --artist "' + escape(audioFile.tags['artist']) + '"'
if 'album' in audioFile.tags:
cmd += ' --album "' + escape(audioFile.tags['album']) + '"'
if 'title' in audioFile.tags:
cmd += ' --title "' + escape(audioFile.tags['title']) + '"'
if 'track' in audioFile.tags:
cmd += ' --tracknum "' + escape(audioFile.tags['track']) + '"'
if 'year' in audioFile.tags:
cmd += ' --year "' + escape(audioFile.tags['year']) + '"'
if 'comment' in audioFile.tags:
cmd += ' --comment "' + escape(audioFile.tags['comment']) + '"'
exitCode = runProcess(cmd)
if exitCode == 0:
encodedAudioFile = ALACAudioFile(outputPath)
encodedAudioFile.tags = audioFile.tags
return encodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
class FLACAudioEncoder(AudioEncoder):
def encode(self, audioFile, outputPath):
outputPath += '.flac'
pcmAudioFile = audioFile.decode()
if pcmAudioFile == None:
return None
cmd = FLAC + ' --silent ' + self.opts
if 'artist' in audioFile.tags:
cmd += ' -T ARTIST="' + escape(audioFile.tags['artist']) + '"'
if 'album' in audioFile.tags:
cmd += ' -T ALBUM="' + escape(audioFile.tags['album']) + '"'
if 'title' in audioFile.tags:
cmd += ' -T TITLE="' + escape(audioFile.tags['title']) + '"'
if 'track' in audioFile.tags:
cmd += ' -T TRACKNUMBER="' + escape(audioFile.tags['track']) + '"'
if 'year' in audioFile.tags:
cmd += ' -T DATE="' + escape(audioFile.tags['year']) + '"'
if 'comment' in audioFile.tags:
cmd += ' -T COMMENT="' + escape(audioFile.tags['comment']) + '"'
cmd += ' -o "' + escape(outputPath) + \
'" "' + escape(pcmAudioFile.path) + '"'
exitCode = runProcess(cmd)
if exitCode == 0:
encodedAudioFile = FLACAudioFile(outputPath)
encodedAudioFile.tags = audioFile.tags
return encodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
class OggencAudioEncoder(AudioEncoder):
def encode(self, audioFile, outputPath):
outputPath += '.ogg'
pcmAudioFile = audioFile.decode()
if pcmAudioFile == None:
return None
cmd = OGGENC + ' --quiet ' + self.opts
if 'artist' in audioFile.tags:
cmd += ' -a "' + escape(audioFile.tags['artist']) + '"'
if 'album' in audioFile.tags:
cmd += ' -l "' + escape(audioFile.tags['album']) + '"'
if 'title' in audioFile.tags:
cmd += ' -t "' + escape(audioFile.tags['title']) + '"'
if 'track' in audioFile.tags:
cmd += ' -N "' + escape(audioFile.tags['track']) + '"'
if 'year' in audioFile.tags:
cmd += ' -d "' + escape(audioFile.tags['year']) + '"'
if 'comment' in audioFile.tags:
cmd += ' -c "COMMENT=' + escape(audioFile.tags['comment']) + '"'
cmd += ' -o "' + escape(outputPath) + \
'" "' + escape(pcmAudioFile.path) + '"'
exitCode = runProcess(cmd)
if exitCode == 0:
encodedAudioFile = VorbisAudioFile(outputPath)
encodedAudioFile.tags = audioFile.tags
return encodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
class LAMEAudioEncoder(AudioEncoder):
def encode(self, audioFile, outputPath):
outputPath += '.mp3'
pcmAudioFile = audioFile.decode()
if pcmAudioFile == None:
return None
cmd = LAME + ' --silent ' + self.opts
if 'artist' in audioFile.tags:
cmd += ' --ta "' + escape(audioFile.tags['artist']) + '"'
if 'album' in audioFile.tags:
cmd += ' --tl "' + escape(audioFile.tags['album']) + '"'
if 'title' in audioFile.tags:
cmd += ' --tt "' + escape(audioFile.tags['title']) + '"'
if 'track' in audioFile.tags:
cmd += ' --tn "' + escape(audioFile.tags['track']) + '"'
if 'year' in audioFile.tags:
cmd += ' --ty "' + escape(audioFile.tags['year']) + '"'
if 'comment' in audioFile.tags:
cmd += ' --tc "' + escape(audioFile.tags['comment']) + '"'
cmd += ' "' + escape(pcmAudioFile.path) + \
'" "' + escape(outputPath) + '"'
exitCode = runProcess(cmd)
if exitCode == 0:
encodedAudioFile = MP3AudioFile(outputPath)
encodedAudioFile.tags = audioFile.tags
return encodedAudioFile
else:
try:
deleteFile(outputPath)
except OSError:
pass
return None
# ------------------------------------------------------------------------------
class FileList(object):
'''Store the absolute paths to all of the files under one root directory'''
def __init__(self, rootPath, absoluteFilePaths):
self.rootPath = rootPath
self.absoluteFilePaths = absoluteFilePaths
class EncoderSetting(object):
def __init__(self, name, folder, extension, encoder):
self.name = name
self.folder = folder
self.extension = extension
self.encoder = encoder
def findEncoderSetting(name):
for s in encoderSettings:
if s.name == name:
return s
return None
class EncoderAndOutputPath(object):
def __init__(self, setting, outputPath):
self.setting = setting
self.outputPath = outputPath
self.encoder = setting.encoder
class QueueEntry(object):
def __init__(self, number, inputAudioFile):
self.number = number
self.inputAudioFile = inputAudioFile
self.encoderAndOutputPaths = []
def addEncoderAndOutputPath(self, setting, outputPath):
self.encoderAndOutputPaths.append(EncoderAndOutputPath(setting,
outputPath))
# ------------------------------------------------------------------------------
def parseCommandLine():
# return (settings, input directories, output directory)
# Accept the following command line arguments:
# -s Encoder Setting (at least one)
# -i Input Directory (at least one)
# Output Directory (exactly one)
settings = []
inputDirectories = []
outputDirectory = ''
try:
opts, args = getopt.getopt(sys.argv[1:], 's:i:')
except getopt.GetoptError as e:
err(e)
sys.exit(EXIT_CMDFAILURE)
if len(args) != 1:
err('You must specify exactly one output directory')
sys.exit(EXIT_CMDFAILURE)
else:
outputDirectory = os.path.normpath(args[0])
for opt, arg in opts:
if opt == '-s':
setting = findEncoderSetting(arg)
if setting:
if settings.count(setting) > 0:
err('Encoder setting "' + arg + '" was specified more than once')
sys.exit(EXIT_CMDFAILURE)
else:
settings.append(setting)
else:
err('Encoder setting "' + arg + '" does not exist')
sys.exit(EXIT_CMDFAILURE)
elif opt == '-i':
dir = os.path.normpath(arg)
if os.path.isdir(dir):
for d in inputDirectories:
if os.path.basename(d) == os.path.basename(dir):
err('WARNING: Duplicate basenames in input directories: ' + os.path.basename(dir))
inputDirectories.append(dir)
else:
err('Not a directory: ' + dir)
sys.exit(EXIT_CMDFAILURE)
if len(settings) < 1:
err('You must specify at least one encoder setting')
sys.exit(EXIT_CMDFAILURE)
if len(inputDirectories) < 1:
err('You must specify at least one input folder')
sys.exit(EXIT_CMDFAILURE)
return (settings, inputDirectories, outputDirectory)
def recursiveFileList(root):
'''Return a FileList of all the files in a directory'''
absoluteFilePaths = []
# Clean up the root path (and remove the trailing slash)
root = os.path.normpath(root)
for dirpath, dirnames, filenames in os.walk(root):
# Add every non-hidden file in the directory to the list
for file in filenames:
if file.startswith('.'):
dbg('Ignoring file ' + os.path.join(dirpath, file))
else:
absoluteFilePaths.append(os.path.join(dirpath, file))
absoluteFilePaths.sort()
return FileList(root, absoluteFilePaths)
def populateQueue(inputQueue, settings, inputFileLists, outputDirectory):
counter = 1
for fileList in inputFileLists:
for filePath in fileList.absoluteFilePaths:
audioFile = makeAudioFile(filePath)
if audioFile:
queueEntry = QueueEntry(counter, audioFile)
counter += 1
# Add a trailing slash to the rootPath
rootStr = os.path.join(fileList.rootPath, '')
relativeFilePath = filePath.split(rootStr, 1)[1]
relativeFilePathWithoutExtension = os.path.splitext(
relativeFilePath)[0]
rootName = os.path.basename(fileList.rootPath)
for setting in settings:
outputPath = os.path.join(outputDirectory,
setting.folder, rootName,
relativeFilePathWithoutExtension)
queueEntry.addEncoderAndOutputPath(setting, outputPath)
inputQueue.put(queueEntry)
else:
# TODO: It might be a good idea to keep track of these and dump
# them when transcoding has finished
err('Could not make an AudioFile out of ' + filePath)
if inputQueue.qsize() < 1:
err('No audio files found')
sys.exit(EXIT_SUCCESS)
def dumpShitList(shitList):
size = shitList.qsize()
if size > 0:
err('The following ' + str(size) + ' files could not be transcoded:')
while True:
try:
entry = shitList.get(block=False)
err(' ' + entry.inputAudioFile.path)
except queue.Empty:
break
return size
# ------------------------------------------------------------------------------
def worker(threadNum, inputQueue, queueSize, shitList):
while True:
try:
entry = inputQueue.get(block=False)
except queue.Empty:
dbg('Thread ' + str(threadNum) + ' finished')
return
prefix = '[{:> 6} / {:>6}]: '.format(entry.number, queueSize)
msg(prefix + 'Transcoding ' + entry.inputAudioFile.name)
decodedAudioFile = entry.inputAudioFile.decode()
if decodedAudioFile is None:
err(prefix + 'Decode failed')
shitList.put(entry)
continue
for pair in entry.encoderAndOutputPaths:
folderPath = os.path.dirname(pair.outputPath)
try:
dbg(prefix + 'Making directories for ' + folderPath)
os.makedirs(folderPath, exist_ok=True)
except OSError as e:
err(prefix + 'Could not make directories for "' + folderPath + '": ' + e.strerror)
shitList.put(entry)
break
dbg(prefix + 'Encoding with ' + pair.setting.name)
encodedAudioFile = pair.encoder.encode(decodedAudioFile,
pair.outputPath)
if encodedAudioFile is None:
err(prefix + 'Encoding with ' + pair.setting.name + ' failed')
shitList.put(entry)
break
entry.inputAudioFile.removeTemporaryFiles()
dbg(prefix + 'Finished')
def spawnThreads(inputQueue, queueSize, shitList):
workers = []
# Spawn and run all the worker threads
for i in range(1, numCores() + 1):
dbg('Spawning thread ' + str(i))
thread = threading.Thread(target=worker, args=[i, inputQueue, queueSize, shitList])
workers.append(thread)
thread.start()
# The workers will finish once the queue is empty. Everything is
# done when all workers have finished.
for w in workers:
w.join()
def main():
inputFileLists = [] # A list of FileList objects
inputQueue = queue.Queue()
shitList = queue.Queue()
settings, inputDirectories, outputDirectory = parseCommandLine()
for s in settings:
msg('Encoding with ' + s.name)
for d in inputDirectories:
msg('Searching ' + d)
msg('Outputting to ' + outputDirectory)
for d in inputDirectories:
inputFileLists.append(recursiveFileList(d))
populateQueue(inputQueue, settings, inputFileLists, outputDirectory)
queueSize = inputQueue.qsize()
timer = Timer()
sep()
timer.start()
spawnThreads(inputQueue, queueSize, shitList)
timer.stop()
sep()
shitListSize = dumpShitList(shitList)
msg('Encoded ' + str(queueSize - shitListSize) + ' files in ' + timer.stringDelta())
# ------------------------------------------------------------------------------
encoderSettings = [
# NAME FOLDER EXTENSION ENCODER
EncoderSetting('alac', 'alac', 'm4a', ALACAudioEncoder('')),
EncoderSetting('flac', 'flac', 'flac', FLACAudioEncoder('--best --verify')),
EncoderSetting('oggenc-q5', 'vorbis-q5', 'ogg', OggencAudioEncoder('-q 5')),
EncoderSetting('lame-vbr2', 'mp3-vbr2', 'mp3', LAMEAudioEncoder('-m j -h --vbr-new -V 2 --id3v2-only --noreplaygain')),
EncoderSetting('lame-cbr192', 'mp3-cbr192', 'mp3', LAMEAudioEncoder('-m j -h -b 192 --id3v2-only --noreplaygain')),
EncoderSetting('lame-cbr256', 'mp3-cbr256', 'mp3', LAMEAudioEncoder('-m j -h -b 256 --id3v2-only --noreplaygain')),
EncoderSetting('lame-standard', 'mp3-standard', 'mp3', LAMEAudioEncoder('--preset standard --id3v2-only --noreplaygain')),
EncoderSetting('lame-extreme', 'mp3-extreme', 'mp3', LAMEAudioEncoder('--preset extreme --id3v2-only --noreplaygain')),
EncoderSetting('lame-insane', 'mp3-insane', 'mp3', LAMEAudioEncoder('--preset insane --id3v2-only --noreplaygain')),
]
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
<reponame>scottwedge/OpenStack-Stein<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import eventlet
import inspect
from oslo_config import cfg
from oslo_context import context as oslo_context
from oslo_log import log as logging
from oslo_utils import timeutils
from osprofiler import profiler
import six
from senlin.common import consts
from senlin.common import context
from senlin.common import exception as exc
from senlin.common.i18n import _
from senlin.common import schema
from senlin.common import utils
from senlin.drivers import base as driver_base
from senlin.engine import environment
from senlin.objects import credential as co
from senlin.objects import profile as po
LOG = logging.getLogger(__name__)
class Profile(object):
"""Base class for profiles."""
VERSIONS = {}
KEYS = (
TYPE, VERSION, PROPERTIES,
) = (
'type', 'version', 'properties',
)
spec_schema = {
TYPE: schema.String(
_('Name of the profile type.'),
required=True,
),
VERSION: schema.String(
_('Version number of the profile type.'),
required=True,
),
PROPERTIES: schema.Map(
_('Properties for the profile.'),
required=True,
)
}
properties_schema = {}
OPERATIONS = {}
def __new__(cls, name, spec, **kwargs):
"""Create a new profile of the appropriate class.
:param name: The name for the profile.
:param spec: A dictionary containing the spec for the profile.
:param kwargs: Keyword arguments for profile creation.
:returns: An instance of a specific sub-class of Profile.
"""
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
if cls != Profile:
ProfileClass = cls
else:
ProfileClass = environment.global_env().get_profile(type_str)
return super(Profile, cls).__new__(ProfileClass)
def __init__(self, name, spec, **kwargs):
"""Initialize a profile instance.
:param name: A string that specifies the name for the profile.
:param spec: A dictionary containing the detailed profile spec.
:param kwargs: Keyword arguments for initializing the profile.
:returns: An instance of a specific sub-class of Profile.
"""
type_name, version = schema.get_spec_version(spec)
self.type_name = type_name
self.version = version
type_str = "-".join([type_name, version])
self.name = name
self.spec = spec
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', type_str)
self.user = kwargs.get('user')
self.project = kwargs.get('project')
self.domain = kwargs.get('domain')
self.metadata = kwargs.get('metadata', {})
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.spec_data = schema.Spec(self.spec_schema, self.spec)
self.properties = schema.Spec(
self.properties_schema,
self.spec.get(self.PROPERTIES, {}),
version)
if not self.id:
# new object needs a context dict
self.context = self._init_context()
else:
self.context = kwargs.get('context')
# initialize clients
self._computeclient = None
self._networkclient = None
self._orchestrationclient = None
self._workflowclient = None
self._block_storageclient = None
self._glanceclient = None
@classmethod
def _from_object(cls, profile):
"""Construct a profile from profile object.
:param profile: a profile object that contains all required fields.
"""
kwargs = {
'id': profile.id,
'type': profile.type,
'context': profile.context,
'user': profile.user,
'project': profile.project,
'domain': profile.domain,
'metadata': profile.metadata,
'created_at': profile.created_at,
'updated_at': profile.updated_at,
}
return cls(profile.name, profile.spec, **kwargs)
@classmethod
def load(cls, ctx, profile=None, profile_id=None, project_safe=True):
"""Retrieve a profile object from database."""
if profile is None:
profile = po.Profile.get(ctx, profile_id,
project_safe=project_safe)
if profile is None:
raise exc.ResourceNotFound(type='profile', id=profile_id)
return cls._from_object(profile)
@classmethod
def create(cls, ctx, name, spec, metadata=None):
"""Create a profile object and validate it.
:param ctx: The requesting context.
:param name: The name for the profile object.
:param spec: A dict containing the detailed spec.
:param metadata: An optional dictionary specifying key-value pairs to
be associated with the profile.
:returns: An instance of Profile.
"""
if metadata is None:
metadata = {}
try:
profile = cls(name, spec, metadata=metadata, user=ctx.user_id,
project=ctx.project_id)
profile.validate(True)
except (exc.ResourceNotFound, exc.ESchema) as ex:
error = _("Failed in creating profile %(name)s: %(error)s"
) % {"name": name, "error": six.text_type(ex)}
raise exc.InvalidSpec(message=error)
profile.store(ctx)
return profile
@classmethod
def delete(cls, ctx, profile_id):
po.Profile.delete(ctx, profile_id)
def store(self, ctx):
"""Store the profile into database and return its ID."""
timestamp = timeutils.utcnow(True)
values = {
'name': self.name,
'type': self.type,
'context': self.context,
'spec': self.spec,
'user': self.user,
'project': self.project,
'domain': self.domain,
'meta_data': self.metadata,
}
if self.id:
self.updated_at = timestamp
values['updated_at'] = timestamp
po.Profile.update(ctx, self.id, values)
else:
self.created_at = timestamp
values['created_at'] = timestamp
profile = po.Profile.create(ctx, values)
self.id = profile.id
return self.id
@classmethod
@profiler.trace('Profile.create_object', hide_args=False)
def create_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_create(obj)
@classmethod
@profiler.trace('Profile.create_cluster_object', hide_args=False)
def create_cluster_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
try:
ret = profile.do_cluster_create(obj)
except NotImplementedError:
return None
return ret
@classmethod
@profiler.trace('Profile.delete_object', hide_args=False)
def delete_object(cls, ctx, obj, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_delete(obj, **params)
@classmethod
@profiler.trace('Profile.delete_cluster_object', hide_args=False)
def delete_cluster_object(cls, ctx, obj, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
try:
ret = profile.do_cluster_delete(obj, **params)
except NotImplementedError:
return None
return ret
@classmethod
@profiler.trace('Profile.update_object', hide_args=False)
def update_object(cls, ctx, obj, new_profile_id=None, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
new_profile = None
if new_profile_id:
new_profile = cls.load(ctx, profile_id=new_profile_id)
return profile.do_update(obj, new_profile, **params)
@classmethod
@profiler.trace('Profile.get_details', hide_args=False)
def get_details(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_get_details(obj)
@classmethod
@profiler.trace('Profile.adopt_node', hide_args=False)
def adopt_node(cls, ctx, obj, type_name, overrides=None, snapshot=False):
"""Adopt a node.
:param ctx: Request context.
:param obj: A temporary node object.
:param overrides: An optional parameter that specifies the set of
properties to be overridden.
:param snapshot: A boolean flag indicating whether a snapshot should
be created before adopting the node.
:returns: A dictionary containing the profile spec created from the
specific node, or a dictionary containing error message.
"""
parts = type_name.split("-")
tmpspec = {"type": parts[0], "version": parts[1]}
profile = cls("name", tmpspec)
return profile.do_adopt(obj, overrides=overrides, snapshot=snapshot)
@classmethod
@profiler.trace('Profile.join_cluster', hide_args=False)
def join_cluster(cls, ctx, obj, cluster_id):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_join(obj, cluster_id)
@classmethod
@profiler.trace('Profile.leave_cluster', hide_args=False)
def leave_cluster(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_leave(obj)
@classmethod
@profiler.trace('Profile.check_object', hide_args=False)
def check_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_check(obj)
@classmethod
@profiler.trace('Profile.check_object', hide_args=False)
def healthcheck_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_healthcheck(obj)
@classmethod
@profiler.trace('Profile.recover_object', hide_args=False)
def recover_object(cls, ctx, obj, **options):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_recover(obj, **options)
def validate(self, validate_props=False):
"""Validate the schema and the data provided."""
# general validation
self.spec_data.validate()
self.properties.validate()
ctx_dict = self.properties.get('context', {})
if ctx_dict:
argspec = inspect.getargspec(context.RequestContext.__init__)
valid_keys = argspec.args
bad_keys = [k for k in ctx_dict if k not in valid_keys]
if bad_keys:
msg = _("Some keys in 'context' are invalid: %s") % bad_keys
raise exc.ESchema(message=msg)
if validate_props:
self.do_validate(obj=self)
@classmethod
def get_schema(cls):
return dict((name, dict(schema))
for name, schema in cls.properties_schema.items())
@classmethod
def get_ops(cls):
return dict((name, dict(schema))
for name, schema in cls.OPERATIONS.items())
def _init_context(self):
profile_context = {}
if self.CONTEXT in self.properties:
profile_context = self.properties[self.CONTEXT] or {}
ctx_dict = context.get_service_credentials(**profile_context)
ctx_dict.pop('project_name', None)
ctx_dict.pop('project_domain_name', None)
return ctx_dict
def _build_conn_params(self, user, project):
"""Build connection params for specific user and project.
:param user: The ID of the user for which a trust will be used.
:param project: The ID of the project for which a trust will be used.
:returns: A dict containing the required parameters for connection
creation.
"""
cred = co.Credential.get(oslo_context.get_current(), user, project)
if cred is None:
raise exc.TrustNotFound(trustor=user)
trust_id = cred.cred['openstack']['trust']
# This is supposed to be trust-based authentication
params = copy.deepcopy(self.context)
params['trust_id'] = trust_id
return params
def compute(self, obj):
"""Construct compute client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._computeclient is not None:
return self._computeclient
params = self._build_conn_params(obj.user, obj.project)
self._computeclient = driver_base.SenlinDriver().compute(params)
return self._computeclient
def glance(self, obj):
"""Construct glance client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._glanceclient is not None:
return self._glanceclient
params = self._build_conn_params(obj.user, obj.project)
self._glanceclient = driver_base.SenlinDriver().glance(params)
return self._glanceclient
def network(self, obj):
"""Construct network client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._networkclient is not None:
return self._networkclient
params = self._build_conn_params(obj.user, obj.project)
self._networkclient = driver_base.SenlinDriver().network(params)
return self._networkclient
def orchestration(self, obj):
"""Construct orchestration client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._orchestrationclient is not None:
return self._orchestrationclient
params = self._build_conn_params(obj.user, obj.project)
oc = driver_base.SenlinDriver().orchestration(params)
self._orchestrationclient = oc
return oc
def workflow(self, obj):
if self._workflowclient is not None:
return self._workflowclient
params = self._build_conn_params(obj.user, obj.project)
self._workflowclient = driver_base.SenlinDriver().workflow(params)
return self._workflowclient
def block_storage(self, obj):
"""Construct cinder client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._block_storageclient is not None:
return self._block_storageclient
params = self._build_conn_params(obj.user, obj.project)
self._block_storageclient = driver_base.SenlinDriver().block_storage(
params)
return self._block_storageclient
def do_create(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_cluster_create(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_delete(self, obj, **params):
"""For subclass to override."""
raise NotImplementedError
def do_cluster_delete(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_update(self, obj, new_profile, **params):
"""For subclass to override."""
LOG.warning("Update operation not supported.")
return True
def do_check(self, obj):
"""For subclass to override."""
LOG.warning("Check operation not supported.")
return True
def do_healthcheck(self, obj):
"""Default healthcheck operation.
This is provided as a fallback if a specific profile type does not
override this method.
:param obj: The node object to operate on.
:return status: True indicates node is healthy, False indicates
it is unhealthy.
"""
return self.do_check(obj)
def do_get_details(self, obj):
"""For subclass to override."""
LOG.warning("Get_details operation not supported.")
return {}
def do_adopt(self, obj, overrides=None, snapshot=False):
"""For subclass to override."""
LOG.warning("Adopt operation not supported.")
return {}
def do_join(self, obj, cluster_id):
"""For subclass to override to perform extra operations."""
LOG.warning("Join operation not specialized.")
return True
def do_leave(self, obj):
"""For subclass to override to perform extra operations."""
LOG.warning("Leave operation not specialized.")
return True
def do_recover(self, obj, **options):
"""Default recover operation.
This is provided as a fallback if a specific profile type does not
override this method.
:param obj: The node object to operate on.
:param options: Keyword arguments for the recover operation.
:return id: New id of the recovered resource or None if recovery
failed.
:return status: True indicates successful recovery, False indicates
failure.
"""
operation = options.get('operation', None)
force_recreate = options.get('force_recreate', None)
delete_timeout = options.get('delete_timeout', None)
if operation.upper() != consts.RECOVER_RECREATE:
LOG.error("Recover operation not supported: %s", operation)
return None, False
extra_params = options.get('operation_params', None)
fence_compute = False
if extra_params:
fence_compute = extra_params.get('fence_compute', False)
try:
self.do_delete(obj, force=fence_compute, timeout=delete_timeout)
except exc.EResourceDeletion as ex:
if force_recreate:
# log error and continue on to creating the node
LOG.warning('Failed to delete node during recovery action: %s',
ex)
else:
raise exc.EResourceOperation(op='recovering', type='node',
id=obj.id,
message=six.text_type(ex))
# pause to allow deleted resource to get reclaimed by nova
# this is needed to avoid a problem when the compute resources are
# at their quota limit. The deleted resource has to become available
# so that the new node can be created.
eventlet.sleep(cfg.CONF.batch_interval)
res = None
try:
res = self.do_create(obj)
except exc.EResourceCreation as ex:
raise exc.EResourceOperation(op='recovering', type='node',
id=obj.id, message=six.text_type(ex))
return res, True
def do_validate(self, obj):
"""For subclass to override."""
LOG.warning("Validate operation not supported.")
return True
def to_dict(self):
pb_dict = {
'id': self.id,
'name': self.name,
'type': self.type,
'user': self.user,
'project': self.project,
'domain': self.domain,
'spec': self.spec,
'metadata': self.metadata,
'created_at': utils.isotime(self.created_at),
'updated_at': utils.isotime(self.updated_at),
}
return pb_dict
def validate_for_update(self, new_profile):
non_updatables = []
for (k, v) in new_profile.properties.items():
if self.properties.get(k, None) != v:
if not self.properties_schema[k].updatable:
non_updatables.append(k)
if not non_updatables:
return True
msg = ", ".join(non_updatables)
LOG.error("The following properties are not updatable: %s.", msg)
return False
|
import codecs
import csv
import json
import os
import re
import sys
import tarfile
from itertools import islice
from biorun import convert
from biorun import utils
from biorun.libs import placlib as plac
from biorun.libs.sqlitedict import SqliteDict
JSON_DB_NAME = "taxdb.json"
SQLITE_DB_NAME = "taxdb.sqlite"
TAXDB_URL = "http://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz"
TAXDB_NAME = "taxdump.tar.gz"
join = os.path.join
# Create the full paths
TAXDB_NAME = join(utils.DATADIR, TAXDB_NAME)
SQLITE_DB = join(utils.DATADIR, SQLITE_DB_NAME)
JSON_DB = join(utils.DATADIR, JSON_DB_NAME)
# Keys into the database
GRAPH, BACK, TAXID = "GRAPH", "BACK", "TAXIDS"
# Indentation character
INDENT = ' '
# Fields separator
SEP = ', '
# Used during debugging only to speed up database builds.
# Keep it at None
LIMIT = None
CHUNK = 25000
logger = utils.logger
def download_prebuilt():
"""
Downloads prebuild databases.
"""
url = "http://data.biostarhandbook.com/bio/"
url_sqlite = f"{url}/taxdb.sqlite"
url_json = f"{url}/taxdb.json"
utils.download(url=url_sqlite, dest_name=SQLITE_DB_NAME, cache=True)
utils.download(url=url_json, dest_name=JSON_DB_NAME, cache=True)
def update_taxdump(url=TAXDB_URL, dest_name=TAXDB_NAME):
"""
Downloads taxdump file.
"""
utils.download(url=url, dest_name=dest_name)
def build_database(archive=TAXDB_NAME, limit=None):
"""
Downloads taxdump file.
"""
print(f"*** building database from: {archive}")
# The location of the archive.
path = os.path.join(utils.DATADIR, archive)
# Download the latest taxdump file.
update_taxdump()
# Check the file.
if not os.path.isfile(path):
utils.error(f"no taxdump file found")
# Parse the names
tax2data = parse_names(archive, limit=limit)
# Parse the nodes and backpropagation.
graph = parse_nodes(archive, tax2data=tax2data, limit=limit)
# A shortcut to the function.
def save_table(name, obj):
utils.save_table(name=name, obj=obj, fname=SQLITE_DB)
# Save the taxid definitions.
save_table(TAXID, tax2data)
# Save the graph.
save_table(GRAPH, graph)
print("*** saving the JSON model")
json_path = os.path.join(utils.DATADIR, JSON_DB)
# Save the JSON file as well.
store = dict(TAXID=tax2data, GRAPH=graph)
fp = open(json_path, 'wt')
json.dump(store, fp, indent=4)
fp.close()
def open_tarfile(archive, filename, limit=None, delimiter="\t"):
"""
Returns the content of named file in a tarred archive.
"""
tar = tarfile.open(archive, "r:gz")
mem = tar.getmember(filename)
stream = tar.extractfile(mem)
stream = map(lambda x: x.decode("ascii"), stream)
stream = islice(stream, limit)
stream = csv.reader(stream, delimiter=delimiter)
return stream
def search_names(word, archive=TAXDB_NAME, name="names.dmp", limit=None):
"""
Processes the names.dmp component of the taxdump.
"""
# Needs a taxdump to work.
if not os.path.isfile(archive):
utils.error("taxdump file not found (download and build it first)")
# Open stream into the tarfile.
stream = open_tarfile(archive=archive, filename=name, limit=limit)
# The pattern may be regular expression.
patt = re.compile(word, re.IGNORECASE)
# Labels that will be searched.
valid = {'scientific name', 'equivalent name', 'genbank common name'}
def select(row):
taxid, name, label = row[0], row[2], row[6]
return label in valid and patt.search(name)
# Apply the selector.
stream = filter(select, stream)
for elems in stream:
taxid, name, label = elems[0], elems[2], elems[6]
yield taxid, name
def parse_names(archive, filename="names.dmp", limit=None):
"""
Parses the names.dmp component of the taxdump.
"""
# Parse the tarfile.
stream = open_tarfile(archive=archive, filename=filename, limit=limit)
# Lookup tables.
tax2data, name2tax = {}, {}
print(f"*** processing: {filename}")
# Process the nodes.dmp file.
for row in stream:
# The names.dmp file structure.
taxid, name, label = row[0], row[2], row[6]
# Assembly count if exists (TODO)
count = 0
# Populate only for scientific names.
if label == 'scientific name':
# 5 columns: sciname, rank, common name, parent, assembly count
# Some information will be only known later, from the nodes.dmp
tax2data[taxid] = [name, "", "", "", count]
elif label == 'genbank common name':
name2tax[name] = taxid
# Fill common names when it exists.
for taxid, name in name2tax.items():
if taxid in tax2data:
tax2data[taxid][2] = name
return tax2data
def parse_nodes(archive, tax2data, filename="nodes.dmp", limit=None):
"""
Parses the names.dmp component of the taxdump.
"""
# Parse the NCBI taxump.
stream = open_tarfile(archive=archive, filename=filename, limit=limit)
# Data structures to fill.
graph = {}
print("*** processing: nodes.dmp")
# Process the nodes.dmp file.
for row in stream:
# nodes.dmp file format.
child, parent, rank = row[0], row[2], row[4]
# Connect parent to all children
graph.setdefault(parent, []).append(child)
# Mutates existing datastructure with rank and parent info.
if child in tax2data:
tax2data[child][1] = rank
tax2data[child][3] = parent
return graph
def open_db(table, fname=SQLITE_DB, flag='c'):
"""
Opens a connection to a data table.
"""
conn = SqliteDict(fname, tablename=table, flag=flag, encode=json.dumps, decode=json.loads)
return conn
queue = list()
def get_values(node, names):
sciname, rank, name, parent, count = names.get(node, ("MISSING", "NO RANK", "", "", 0))
return sciname, rank, name, parent, count
def node_formatter(node, names, depth):
"""
Creates a long form representation of a node.
"""
indent = INDENT * depth
sciname, rank, name, parent, count = get_values(node, names)
# Write common name if exists and different from sciname
if name and name != sciname:
data = [rank, node, sciname, name]
else:
data = [rank, node, sciname]
text = indent + SEP.join(data)
return text
def backprop(node, names, collect=[]):
"""
Collects nodes when propagating backwards.
"""
if node in names:
sciname, rank, name, parent, count = names[node]
if parent and parent != node:
collect.append(parent)
backprop(parent, names, collect)
def print_lineage(taxid, names, flat=0):
"""
Prints the lineage for a taxid.
"""
# Must be a valid taxid.
if taxid not in names:
msg = f"Invalid taxid: {taxid}"
utils.error(msg)
# Will back propagate to parents.
collect = [taxid]
backprop(taxid, names, collect=collect)
# Going back to superkingdom only.
collect = collect[:-1]
# Start at the parent.
collect = reversed(collect)
# Format each node.
for step, node in enumerate(collect):
text = node_formatter(node, names=names, depth=step)
print(text)
def get_data(preload=False, acc=False):
"""
Returns the graph structure for the database.
"""
if preload:
if not os.path.isfile(JSON_DB):
utils.error(f"taxonomy file not found (you must build it first): {JSON_DB}")
store = json.load(open(JSON_DB))
names = store[TAXID]
graph = store[GRAPH]
else:
names = open_db(TAXID)
graph = open_db(GRAPH)
return names, graph
def print_stats(names, graph):
node_size, graph_size = len(names), len(graph)
print(f"TaxDB: nodes={node_size:,d} parents={graph_size:,d}")
def search_taxa(word, preload=False):
names, graph = get_data(preload=preload)
word = decode(word)
print(f"# Searching taxonomy for: {word}")
for taxid, name in search_names(word):
text = node_formatter(taxid, names=names, depth=0)
print(text)
def check_num(value):
try:
int(value)
return True
except ValueError as exc:
return False
def print_database(names, graph):
for name in names:
text = node_formatter(name, names=names, depth=0)
print(text)
def print_term(taxid, graph, names, maxdepth=0):
"""
Prints a term when visited via DFS.
"""
def formatter(node, depth, **kwds):
text = node_formatter(node, names=names, depth=depth)
print(text)
dfs_visitor(graph, taxid, visited={}, func=formatter, maxdepth=maxdepth)
def donothing(*args, **kwds):
"""
Placeholder to perform no action.
"""
pass
def valid_int(text):
try:
int(text)
return True
except ValueError as exc:
return False
def dfs_visitor(graph, node, visited, depth=0, func=donothing, maxdepth=0):
"""
Performs depth-first search and collects output into the visited dictionary keyed by depth.
Calls func at every visit.
"""
if node not in visited:
visited[node] = depth
func(node=node, depth=depth, visited=visited)
for nbr in graph.get(node, []):
nextdepth = depth + 1
if maxdepth and nextdepth >= maxdepth:
continue
dfs_visitor(graph=graph, node=nbr, depth=nextdepth, visited=visited, func=func, maxdepth=maxdepth)
def filter_file(stream, terms, keep, remove, graph, colidx=0):
"""
Filters a file to retain only the rows where a taxid is ina subtree.
"""
if not stream:
if len(terms) == 0:
msg = f"filtering needs an input stream or a filename"
utils.error(msg)
stream = open(terms[0])
# Collects all children of the taxids.
keep_dict, remove_dict = {}, {}
# Taxids to keep
keeper = keep.split(",")
# Fill the keeper dictionary.
for term in keeper:
dfs_visitor(graph=graph, node=term, visited=keep_dict)
# Fill the remover dictionary.
remover = remove.split(",")
for term in remover:
dfs_visitor(graph=graph, node=term, visited=remove_dict)
# Read the stream.
reader = csv.reader(stream, delimiter="\t")
# Selection condition.
def keep_func(row):
taxid = row[colidx]
return taxid in keep_dict
def remove_func(row):
taxid = row[colidx]
return taxid not in remove_dict
# What to keep.
if keep:
reader = filter(keep_func, reader)
# What to remove.
if remove:
reader = filter(remove_func, reader)
# Generate the output.
writer = csv.writer(sys.stdout, delimiter="\t")
writer.writerows(reader)
def parse_taxids(json):
"""
Attempts to parse taxids from a json data
"""
# Parses the taxids
doubles = [jsonrec.find_taxid(rec) for rec in json] if json else [[]]
# Flatten the list
taxids = [elem for sublist in doubles for elem in sublist]
return taxids
def decode(text):
"""
Recognize string encodings: \t etc
"""
return codecs.decode(text, 'unicode_escape')
def isnum(x):
try:
int(x)
return True
except ValueError as exc:
return False
def parse_stream(stream, field=1, delim="\t"):
"""
Parses a stream for a column.
"""
# One based coordinate system.
colidx = field - 1
# Sanity check.
assert colidx >= 0
# Remove comments
stream = filter(lambda x: not x.startswith('#'), stream)
# Remove empty lines
stream = filter(lambda x: x.strip(), stream)
# Create a reader.
reader = csv.reader(stream, delimiter=delim)
# Keep only rows that have data for the column
reader = filter(lambda row: len(row) >= colidx, reader)
return [row[colidx] for row in reader]
@plac.pos("terms", "taxids or search queries")
@plac.flg('build', "updates and builds a local database")
@plac.flg('preload', "loads entire database in memory")
@plac.flg('list_', "lists database content", abbrev='l')
@plac.opt('scinames', "scientific or common names in each line. ", abbrev="S")
@plac.flg('children', "include children when returning when parsing latin names", abbrev='C')
@plac.flg('lineage', "show the lineage for a taxon term", abbrev="L")
@plac.opt('indent', "the indentation depth (set to zero for flat)")
@plac.opt('sep', "separator (default is ', ')", abbrev='s')
@plac.flg('metadata', "downloads metadata for the taxon", abbrev='m')
@plac.flg('download', "downloads the database from the remote site", abbrev='G')
@plac.opt('depth', "how deep to visit a clade ", abbrev='d', type=int)
@plac.opt('keep', "clade to keep", abbrev='K')
@plac.opt('remove', "clade to remove", abbrev='R')
@plac.opt('field', "which column to read when filtering")
@plac.flg('verbose', "verbose mode, prints more messages")
@plac.flg('accessions', "Print the accessions number for each ")
def run(lineage=False, build=False, download=False, accessions=False, keep='', remove='', field=1,
scinames='', children=False, list_=False, depth=0, metadata=False, preload=False, indent=2, sep='',
verbose=False, *terms):
global SEP, INDENT, LIMIT
# Input may come as a stream.
if not terms and not sys.stdin.isatty():
stream = sys.stdin
else:
stream = None
# Indentation level
INDENT = ' ' * indent
# Separator string.
SEP = decode(sep) if sep else ", "
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Download the prebuilt database.
if download:
download_prebuilt()
# Downloads a new taxdump and builds a new taxonomy database.
if build:
build_database(limit=LIMIT)
# Get the content of the database.
names, graph = get_data(preload=preload, acc=accessions)
# List the content of a database.
if list_:
print_database(names=names, graph=graph)
return
if scinames:
search_file(scinames, names=names, latin=latin, graph=graph, include=children)
return
# Filters a file by a column.
if keep or remove:
filter_file(stream=stream, terms=terms, keep=keep, remove=remove, graph=graph, colidx=field - 1)
return
# Input may come from a file or command line.
if stream:
terms = parse_stream(stream, field=1)
# No valid terms found. Print database stats.
if not terms:
print_stats(names=names, graph=graph)
return
# These are the terms looked up in the database.
words = []
# Some terms may be valid data names.
for term in terms:
term = term.strip()
if os.path.isfile(term):
recs = convert.read_input(fname=term)
recs = filter(convert.source_only(True), recs)
for rec in recs:
print (rec)
# Attempts to extract the taxid from a genbank file.
# Add to the terms.
words.append(term)
# Produce lineages
if lineage:
for term in words:
print_lineage(term, names=names)
return
# Will check to mixed terms (valid taxids and search words mixed)
# Truth vector to terms in names.
valid = list(map(lambda x: x in names, words))
any_valid = any(valid)
all_valid = all(valid)
# Mixed term condition.
mixed_terms = any_valid and not all_valid
# We don't allow mixed terms (produces different outputs).
if mixed_terms:
invalid = ", ".join(filter(lambda x: x not in names, words))
msg = f"Unkown taxids: {invalid}"
utils.error(msg)
# Apply the approprate task to each term separately.
for term in words:
if all_valid:
print_term(term, names=names, graph=graph, maxdepth=depth)
else:
search_taxa(term)
if __name__ == '__main__':
# Bony fish: 117565
# Betacoronavirus: 694002
# SARS-COV2: 2697049
# Jawless vertebrates: 1476529
# Vertebrata: 7742
plac.call(run)
|
<filename>PuzzleGame/env/Lib/site-packages/bangtal/game.py
from ctypes import *
from bangtal.singleton import *
import enum
class EventID(enum.Enum):
ENTER_SCENE = 1
LEAVE_SCENE = 2
PICK_OBJECT = 3
DROP_OBJECT = 4
COMBINE_OBJECT = 5
DISMANTLE_OBJECT = 6
TIMER = 101
KEYPAD = 102
SOUND = 103
class MouseAction(enum.Enum):
CLICK = 0
DRAG_UP = 1
DRAG_DOWN = 2
DRAG_LEFT = 3
DRAG_RIGHT = 4
class GameOption(enum.Enum):
ROOM_TITLE = 1
INVENTORY_BUTTON = 2
MESSAGE_BOX_BUTTON = 3
class GameImpl:
_bt = None
def __init__(self):
self._bt = windll.LoadLibrary("Bangtal")
def startGame(self, scene):
self._bt._startGame(scene)
def enterScene(self, scene):
self._bt._enterScene(scene)
def endGame(self):
self._bt._endGame()
def createScene(self, name, file):
return self._bt._createScene(name, file)
def setSceneImage(self, scene, file):
self._bt._setSceneImage(scene, file)
def setSceneLight(self, scene, light):
self._bt._setSceneLight(scene, c_float(light))
def createObject(self, file):
return self._bt._createObject(file)
def setObjectImage(self, object, file):
self._bt._setObjectImage(object, file)
def locateObject(self, object, scene, x, y):
self._bt._locateObject(object, scene, x, y)
def scaleObject(self, object, scale):
self._bt._scaleObject(object, c_float(scale))
def showObject(self, object):
self._bt._showObject(object)
def hideObject(self, object):
self._bt._hideObject(object)
def pickObject(self, object):
self._bt._pickObject(object)
def dropObject(self, object):
self._bt._dropObject(object)
def defineCombination(self, object1, object2, object3):
self._bt._defineCombination(object1, object2, object3)
def getHandObject(self):
return self._bt._getHandObject()
def showMessage(self, message):
self._bt._showMessage(message)
def showKeypad(self, password, object):
self._bt._showKeypad(password, object)
def showImageViewer(self, file):
self._bt._showImageViewer(file)
def showAudioPlayer(self, file):
self._bt._showAudioPlayer(file)
def showVideoPlayer(self, file):
self._bt._showVideoPlayer(file)
def createSound(self, file):
return self._bt._createSound(file)
def playSound(self, sound, loop):
self._bt._playSound(sound, loop)
def stopSound(self, sound):
self._bt._stopSound(sound)
def createTimer(self, seconds):
return self._bt._createTimer(c_float(seconds))
def setTimer(self, timer, seconds):
self._bt._setTimer(timer, c_float(seconds))
def increaseTimer(self, timer, seconds):
self._bt._increaseTimer(timer, c_float(seconds))
def decreaseTimer(self, timer, seconds):
self._bt._decreaseTimer(timer, c_float(seconds))
def getTimer(self, timer):
self._bt._getTimer.restype = c_float
return self._bt._getTimer(timer)
def startTimer(self, timer):
self._bt._startTimer(timer)
def stopTimer(self, timer):
self._bt._stopTimer(timer)
def showTimer(self, timer):
self._bt._showTimer(timer)
def hideTimer(self):
self._bt._hideTimer()
def setSceneCallback(self, callback):
self._bt._setSceneCallback(callback)
def setObjectCallback(self, callback):
self._bt._setObjectCallback(callback)
def setMouseCallback(self, callback):
self._bt._setMouseCallback(callback)
def setTimerCallback(self, callback):
self._bt._setTimerCallback(callback)
def setSoundCallback(self, callback):
self._bt._setSoundCallback(callback)
def setKeyboardCallback(self, callback):
self._bt._setKeyboardCallback(callback)
def setGameOption(self, option, value):
self._bt._setGameOption(option.value, c_bool(value))
def getGameOption(self, option):
self._bt._getGameOption.restype = c_int32
return bool(self._bt._getGameOption(option.value))
class GameServer(GameImpl, SingletonInstance):
pass
|
<reponame>sarnold/chiptools<filename>chiptools/wrappers/synthesisers/ise.py
import os
import logging
import datetime
import shutil
import traceback
import re
import shlex
from chiptools.common.filetypes import FileType
from chiptools.common import exceptions
from chiptools.common.exceptions import FileNotFoundError
from chiptools.wrappers import synthesiser
log = logging.getLogger(__name__)
# Options file to be used by XFLOW
XST_MIXED_OPT = """
FLOWTYPE = FPGA_SYNTHESIS;
Program xst
-ifn <design>_xst.scr; # input XST script file
-ofn <design>_xst.log; # output XST log file
-intstyle xflow; # Message Reporting Style: ise, xflow, or silent
ParamFile: <design>_xst.scr
"run";
"-ifn <synthdesign>"; # Input/Project File Name
"-ifmt mixed"; # Input Format
"-ofn <design>"; # Output File Name
"-ofmt ngc"; # Output File Format
"-top <design>"; # Top Design Name
"-generics %(generics)s";
"-p <partname>"; # Target Device
End ParamFile
End Program xst
"""
class Ise(synthesiser.Synthesiser):
"""
A ISE Synthesiser instance can be used to synthesise the files in the
given Project using the XFLOW utility or individual Xst, Map, Par,
Ngdbuild, Bitgen and Promgen tools provided in a base Xilinx ISE
installation. The ISE synthesis flow can be set to either *'manual'* flow
where the individual ISE binaries are called in sequence or *'xflow'*
where the XFLOW utility is called (effectively the same thing).
To use the ISE class it must be instanced with a Project and Options
object passed as arguments, the *'synthesise'* method may then be called
to initiate the synthesis flow.
In addition to running the synthesis flow, the ISE Synthesiser instance
also uses a Reporter instance to filter the synthesis log messages for
important information relating to the build.
When complete, the output files from synthesis will be stored in an
archive bearing the name of the entity that was synthesised and a unique
timestamp.
"""
name = 'ise'
executables = [
'xwebtalk',
'promgen',
'xst',
'map',
'par',
'ngdbuild',
'bitgen',
'xflow',
]
def __init__(self, project, user_paths, mode='manual'):
"""
Create a new ISE Synthesiser instance using the supplied Project and
Options objects with the optionsal string parameter *mode* set to
either 'manual' or 'xflow' to determine which ISE tool flow to use
during synthesis.
"""
super(Ise, self).__init__(project, self.executables, user_paths)
self.mode = mode
self.xwebtalk = os.path.join(self.path, 'xwebtalk')
self.promgen = os.path.join(self.path, 'promgen')
self.xst = os.path.join(self.path, 'xst')
self.map = os.path.join(self.path, 'map')
self.par = os.path.join(self.path, 'par')
self.ngdbuild = os.path.join(self.path, 'ngdbuild')
self.bitgen = os.path.join(self.path, 'bitgen')
self.xflow = os.path.join(self.path, 'xflow')
@synthesiser.throws_synthesis_exception
def makeProject(self, projectFilePath, fileFormat='mixed'):
"""
Generate a Xilinx ISE project file listing source files with their
filetypes and libraries.
ISE requires a project file to be written using the following format:
.. code-block: xml
<hdl_language> <compilation_library> <source_file>
Where *hdl_language* specifies whether the designated HDL source file
is written in VHDL or Verilog, *compilation_library* specifies the
library where the HDL is compiled and *source_file* specifies the path
to the source file.
This method generates an appropriate file from the project data that
has been loaded into the ISE Synthesiser instance.
"""
log.info('Creating project file for ISE...')
projectFileString = ''
fileSet = self.project.get_synthesis_fileset()
for libName, fileList in fileSet.items():
for fileObject in fileList:
# We could leave it to the synthesis tool to report missing
# files, but handling them here means we can abort the process
# early and notify the user.
if os.path.isfile(fileObject.path):
if fileObject.fileType == FileType.VHDL:
if fileFormat == 'mixed':
projectFileString += 'vhdl '
elif fileObject.fileType == FileType.Verilog:
if fileFormat == 'mixed':
projectFileString += 'verilog '
elif fileObject.fileType == FileType.SystemVerilog:
if fileFormat == 'mixed':
projectFileString += 'verilog '
elif fileObject.fileType == FileType.NGCNetlist:
base = os.path.dirname(projectFilePath)
newPath = os.path.join(
base, os.path.basename(fileObject.path)
)
if os.path.exists(newPath):
log.warning(
'File already exists: '
+ str(newPath)
+ ' and will be overwritten by: '
+ str(fileObject.path)
)
# Copy the NGC into the local directory
shutil.copyfile(fileObject.path, newPath)
continue
else:
raise exceptions.SynthesisException(
'Unknown file type for synthesis tool: '
+ fileObject.fileType
)
projectFileString += fileObject.library + ' '
projectFileString += fileObject.path + '\n'
else:
raise FileNotFoundError(fileObject.path)
# Write out the synthesis project file
log.debug('Writing: ' + projectFilePath)
with open(projectFilePath, 'w') as f:
f.write(projectFileString)
log.info('...done')
@synthesiser.throws_synthesis_exception
def synthesise(self, library, entity, fpga_part=None):
"""
Synthesise the target entity in the given library for the currently
loaded project. The following steps are performed during synthesis:
* Create synthesis directories
* Generate an ISE project file
* Generate an ISE UCF constraints file
* Invoke XFLOW or the flow tools individually with appropriate command
line arguments
* Generate reports
* Archive the outputs of the synthesis flow
"""
super(Ise, self).synthesise(library, entity, fpga_part)
# make a temporary working directory for the synth tool
import tempfile
startTime = datetime.datetime.now()
log.info(
'Turning Xilinx WebTalk off as it may prevent the removal of '
+ 'temporary directories'
)
try:
self.ise_webtalk_off()
except Exception:
log.debug(traceback.format_exc())
log.warning(
'Could not disable WebTalk, '
+ 'you may encounter PermissionErrors '
+ 'during temporary directory removal'
)
with tempfile.TemporaryDirectory(
dir=self.project.get_synthesis_directory()
) as workingDirectory:
log.info(
'Created temporary synthesis directory: ' + workingDirectory
)
synthName = (
entity + '_synth_' + startTime.strftime('%d%m%y_%H%M%S')
)
archiveName = synthName + '.tar'
synthesisDirectory = os.path.join(workingDirectory, synthName)
os.makedirs(synthesisDirectory)
if fpga_part is None:
fpga_part = self.project.get_fpga_part()
generics = self.project.get_generics().items()
generics = (
'{' + ' '.join(k + '=' + str(v) for k, v in generics) + '}'
)
projectFilePath = os.path.join(synthesisDirectory, entity + '.prj')
exportDirectory = os.path.join(synthesisDirectory, 'output')
reportDirectory = os.path.join(synthesisDirectory, 'reports')
# Add user constraints and other source files
self.addConstraints(entity, synthesisDirectory)
self.makeProject(projectFilePath)
if self.mode == 'xflow':
try:
# Run the flow
self.ise_xflow(
projectFilePath,
fpga_part,
entity,
generics,
synthesisDirectory,
reportDirectory,
exportDirectory,
)
self.generate_programming_files(entity, synthesisDirectory)
except Exception:
# Archive the outputs
log.error(
'Synthesis error, storing output in error directory...'
)
self.storeOutputs(workingDirectory, 'ERROR_' + archiveName)
raise
elif self.mode == 'manual':
try:
# Run the flow
self.ise_manual_flow(
projectFilePath,
fpga_part,
entity,
generics,
synthesisDirectory,
reportDirectory,
exportDirectory,
)
self.generate_programming_files(entity, synthesisDirectory)
except Exception:
# Archive the outputs
log.error(
'Synthesis error, storing output in error directory...'
)
self.storeOutputs(workingDirectory, 'ERROR_' + archiveName)
raise
else:
raise exceptions.SynthesisException(
'Invalid flow type: ' + self.mode
)
# Check the report
reporter_fn = self.project.get_reporter()
try:
if reporter_fn is not None:
reporter_fn(synthesisDirectory)
except Exception:
log.error(
'The post-synthesis reporter script caused an error:\n'
+ traceback.format_exc()
)
# Archive the outputs
log.info('Synthesis completed, saving output to archive...')
self.storeOutputs(workingDirectory, archiveName)
log.info('...done')
@synthesiser.throws_synthesis_exception
def generate_programming_files(self, entity, working_directory):
"""
Generate programming files using the output bitfile from the synthesis
run. An MCS file is always generated, but additional files can be
generated by adding 'args_ise_promgen_<format>' configuration items
with the tool arguments to apply when calling promgen for that output
format.
"""
# Get all ISE tool arguments for the PROMGEN stage:
arg_keys = self.project.get_all_tool_argument_keys(self.name)
def filter_args_fn(x):
return x.startswith('args_{0}_promgen'.format(self.name))
arg_keys = list(filter(filter_args_fn, arg_keys))
for key in arg_keys:
mode = key.split('_')[-1]
if len(mode) > 0 and mode != 'promgen':
log.info(
'Generating PROM file using user defined '
'arguments from configuration item: {0}'.format(key)
)
# Run promgen using the mode recovered from the config key
self.ise_make_prom_file(
entity + '.bit',
entity + '.' + mode,
working_directory,
mode=mode,
)
# Always ensure that an MCS file is created.
if 'args_{0}_promgen_mcs'.format(self.name) not in arg_keys:
self.ise_make_prom_file(
entity + '.bit', entity + '.mcs', working_directory, mode='mcs'
)
@synthesiser.throws_synthesis_exception
def ise_webtalk_off(self):
"""
Call the *xwebtalk* binary with the *-user off* switch to disable
WebTalk
"""
Ise._call(
self.xwebtalk,
['-user', 'off'],
cwd=self.project.get_synthesis_directory(),
quiet=False,
)
@synthesiser.throws_synthesis_exception
def ise_promgen(self, fin, fout, working_directory, args=None):
"""
Call the *promgen* binary, which accepts the following arguments:
Usage: promgen [-b] [-spi] [-p mcs|exo|tek|hex|bin|ieee1532|ufp] [-o
<outfile> {<outfile>}] [-s <size> {<size>}] [-x <xilinx_prom>
{<xilinx_prom>}] [-c [<hexbyte>]] [-l] [-w] [-bpi_dc serial|parallel]
[-intstyle ise|xflow|silent] [-t <templatefile[.pft]>] [-z
[<version:0,3>]] [-i <version:0,3>] [-data_width 8|16|32]
[-config_mode selectmap8|selectmap16|selectmap32] {-ver <version:0,3>
<file> {<file>}} {-u <hexaddr> <file> {<file>}} {-d <hexaddr> <file>
{<file>}} {-n <file> {<file>}} {-bd <file> [start <hexaddr>] [tag
<tagname> {<tagname>}]} {-bm <file>} {-data_file up|down <hexaddr>
<file> {<file>}} [-r <promfile>]
* *fin* is passed to the *<file>* input parameter
* *fout* is passed to the *-o* input parameter
* *workingDirectory* is the working directory where the tool is invoked
"""
# Get additional tool arguments for this flow stage if this method was
# not called with existing arguments.
if args is None:
args = self.project.get_tool_arguments(self.name, 'promgen')
# Allow the user to override PROM loading of bitfiles
if not any([k in args for k in ['-r', '-u', '-d', '-ver']]):
# Default to upward loading from address 0
args += ['-u', '0', fin]
else:
# User provided custom PROM loading argument which we will honour
pass
# Check that the user has supplied a mode
if not any([k in args for k in ['-p', 'mcs']]):
# Default to MCS if the user did not specify a mode
args += ['-p', 'mcs']
# Always overwrite existing files
if '-w' not in args:
args += ['-w']
# Allow the user to override the output file name
if '-o' not in args:
args += ['-o', fout]
Ise._call(self.promgen, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_make_prom_file(self, fin, fout, working_directory, mode='mcs'):
"""
Generate a programming file using the promgen tool, the user can
supply arguments to this flow stage with args_ise_promgen_<mode>.
"""
# Get additional tool arguments for this flow stage
args = self.project.get_tool_arguments(
self.name, 'promgen_{0}'.format(mode)
)
args = shlex.split(['', args][args is not None])
if not any([k in args for k in ['-p', mode]]):
args += ['-p', mode]
self.ise_promgen(fin, fout, working_directory, args)
@synthesiser.throws_synthesis_exception
def ise_xst(self, part, entity, generics, working_directory):
"""
Generate an XST settings file and call the *XST* binary
"""
# Get additional tool arguments for this flow stage
xstargs = self.project.get_tool_arguments(self.name, 'xst')
# Format the args as XST expects
xstargs = re.sub(' -', '\n-', xstargs)
# Write XST file
xst_scr = (
'run\n'
+ '-ifn %(entity)s.prj\n'
+ '-ofn %(entity)s.ngc\n'
+ '-ofmt NGC\n'
+ '-p %(part)s\n'
+ '-top %(entity)s\n'
+ '-generics %(synthesis_generics)s\n'
+ xstargs
+ '\n'
)
with open(os.path.join(working_directory, entity + '.xst'), 'w') as f:
f.write(
xst_scr
% dict(
entity=entity,
part=part,
synthesis_generics=generics,
)
)
args = ['-ifn', entity + '.xst']
args += ['-ofn', entity + '.log']
Ise._call(self.xst, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_map(self, part, entity, working_directory):
"""
Call the *MAP* binary, which accepts the following arguments:
map [-h] [-p partname] (infile[.ngd]) [-o (outfile[.ncd])]
https://www.xilinx.com/support/documentation/sw_manuals/xilinx14_1/devref.pdf
* *part* is passed to the *-p* input parameter
* *entity* is used to generate output file names
* *workingDirectory* is the working directory where the tool is invoked
"""
# Get additional tool arguments for this flow stage
args = self.project.get_tool_arguments(self.name, 'map')
args = shlex.split(['', args][args is not None])
# Part name
args += ['-p', part]
# Output file
args += ['-o', entity + '_map.ncd']
args += [entity + '.ngd']
# PCF Output name
args += [entity + '.pcf']
Ise._call(self.map, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_par(self, entity, working_directory):
"""
Call the *PAR* binary, which accepts the following arguments:
par [-ol std|high] [-pl std|high] [-rl std|high] [-xe n|c] [-mt
on|off|1| 2|3|4] [-t <costtable:1,100>] [-p] [-k]
[-r] [-w] [-smartguide <guidefile[.ncd]>] [-x] [-nopad] [-power
on|off|xe] [-act ivityfile <activityfile[.vcd|.saif]>]
[-ntd] [-intstyle ise|xflow|silent|pa] [-ise <projectrepositoryfile>]
[-filter < filter_file[.filter]>] <infile[.ncd]>
<outfile> [<constraintsfile[.pcf]>]
* *entity* is used to generate output file names
* *workingDirectory* is the working directory where the tool is invoked
"""
# Get additional tool arguments for this flow stage
args = self.project.get_tool_arguments(self.name, 'par')
args = shlex.split(['', args][args is not None])
# Infile
args += [entity + '_map.ncd']
# Output file
args += [entity + '.ncd']
# Physical Constraints File (auto generated)
args += [entity + '.pcf']
Ise._call(self.par, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_ngdbuild(self, part, entity, working_directory):
"""
Call the *NGDBUILD* binary, which accepts the following arguments:
Usage: ngdbuild [-p <partname>] {-sd <source_dir>} {-l <library>} [-ur
<rules_file[.urf]>] [-dd <output_dir>] [-r] [-a] [-u] [-nt
timestamp|on|off] [-uc <ucf_file[.ucf]>] [-aul] [-aut] [-bm
<bmm_file[.bmm]>] [-i] [-intstyle ise|xflow|silent] [-quiet]
[-verbose] [-insert_keep_hierarchy] [-filter <filter_file[.filter]>]
<design_name> [<ngd_file[.ngd]>]
* *entity* is used to generate input and output file names
* *-sd* is set to *workingDirectory*
* *-p* is set to *part*
* *workingDirectory* is the working directory where the tool is invoked
"""
# Get additional tool arguments for this flow stage
args = self.project.get_tool_arguments(self.name, 'ngdbuild')
args = shlex.split(['', args][args is not None])
# Constraints
args += ['-uc', entity + '.ucf']
# Search directory
args += ['-sd', working_directory]
# Part name
args += ['-p', part]
# Input design file
args += [entity + '.ngc']
# Output NGD file
args += [entity + '.ngd']
Ise._call(self.ngdbuild, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_bitgen(self, part, entity, working_directory):
"""
Call the *BITGEN* binary, which accepts the following arguments:
Usage: bitgen [-d] [-j] [-b] [-w] [-l] [-m] [-t] [-n] [-u] [-a] [-r
<bitFile>] [-intstyle ise|xflow|silent|pa] [-ise
<projectrepositoryfile>] {-bd <BRAM_data_file> [tag <tagname>]} {-g
<setting_value>} [-filter <filter_file[.filter]>] <infile[.ncd]>
[<outfile>] [<pcffile[.pcf]>]
* *entity* is used to generate input and output file names
* *workingDirectory* is the working directory where the tool is invoked
"""
# Get additional tool arguments for this flow stage
args = self.project.get_tool_arguments(self.name, 'bitgen')
args = shlex.split(['', args][args is not None])
# Input file
args += [entity + '.ncd']
# Output file
args += [entity + '.bit']
Ise._call(self.bitgen, args, cwd=working_directory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_xflow(
self,
projectFilePath,
part,
entity,
generics,
workingDirectory,
reportDirectory,
exportDirectory,
):
"""
Call the *XFLOW* binary, which accepts the following arguments:
xflow [-p partname] [flow type] [options file[.opt]] [xflow options]
design_name
XFLOW Flow Types:
Create a bitstream for FPGA device configuration using a routed
design.
-config option_file
Create a file that can be used for formal verification of an FPGA
design.
-ecn option_file
Incorporate logic from the design into physical macrocell locations
in a CPLD
-fit option_file
Generate a file that can be used for functional simulation of an
FPGA or CPLD design
-fsim option_file
Implement the design and output a routed NCD file
-implement option_file[fast_runtime.opt, balanced.opt,
high_effort.opt]
Create a file that can be used to perform static timing analysis
of an FPGA design
-sta option_file
Synthesise the design for implementation in an FPGA, for fitting
in a CPLD or for
compiling for functional simulation.
-syth option_file[xst_vhdl.opt/xst_verilog.opt/xst_mixed.opt]
Generate a file that can be used for timing simulation of an FPGA
or CPLD design.
-tsim option_file
"""
# Additional arguments are not supported for XFLOW, the XST flow should
# be used if more control of the stages is required.
if len(self.project.get_tool_arguments(self.name, 'xflow')) > 0:
log.warning(
'The ISE wrapper does not allow additional arguments'
+ ' to be passed to XFLOW. Use the XST flow if fine control'
+ ' of the synthesis stages is required.'
)
# Write the auto-generated options file
with open(os.path.join(workingDirectory, 'xst_custom.opt'), 'w') as f:
f.write(XST_MIXED_OPT % dict(generics=generics))
# Call the flow
args = ['-p', part]
args += ['-synth', 'xst_custom.opt']
args += ['-implement', 'balanced.opt']
args += ['-config', 'bitgen.opt']
args += ['-wd', workingDirectory]
args += ['-ed', exportDirectory]
args += ['-rd', reportDirectory]
args += [projectFilePath]
Ise._call(self.xflow, args, cwd=workingDirectory, quiet=False)
@synthesiser.throws_synthesis_exception
def ise_manual_flow(
self,
projectFilePath,
part,
entity,
generics,
workingDirectory,
reportDirectory,
exportDirectory,
):
"""
Execute the manual ISE tool flow in the following order:
#. XST
#. NGDBUILD
#. MAP
#. PAR
#. BITGEN
Refer to the individual documentation for these tools for more
information.
"""
# XST > NGDBUILD > MAP > PAR > BitGen > PromGen
self.ise_xst(part, entity, generics, workingDirectory)
self.ise_ngdbuild(part, entity, workingDirectory)
self.ise_map(part, entity, workingDirectory)
self.ise_par(entity, workingDirectory)
self.ise_bitgen(part, entity, workingDirectory)
@synthesiser.throws_synthesis_exception
def addConstraints(self, entity, synthesisDirectory):
"""
Load the user constraints file path from the Project instance and
generate a UCF file in the supplied *synthesisDirectory* directory
where the synthesis tools are invoked.
"""
# Add user constraints and other source files
constraintsFiles = self.project.get_constraints()
constraintsData = ''
filesProcessed = []
for fileObject in constraintsFiles:
# Avoid duplicates
if fileObject.path not in filesProcessed:
if fileObject.flow == 'ise' or fileObject.flow is None:
if fileObject.fileType == FileType.UCF:
# Copy the UCF data into the string var
with open(fileObject.path, 'r') as constraintsFile:
constraintsData += constraintsFile.read()
log.info(
'Added constraints file: ' + fileObject.path
)
filesProcessed.append(fileObject.path)
# Write the string var to a single file if we have data
if len(constraintsData) != 0:
newPath = os.path.join(synthesisDirectory, entity + '.ucf')
with open(newPath, 'w') as outFile:
outFile.write(constraintsData)
log.info('Wrote: ' + newPath)
|
<reponame>eax64/apacheconfig
#
# This file is part of apacheconfig software.
#
# Copyright (c) 2018, <NAME> <<EMAIL>>
# License: https://github.com/etingof/apacheconfig/LICENSE.rst
#
import os
import sys
from apacheconfig import *
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
mock = unittest.mock
except AttributeError:
import mock
class LoaderTestCase(unittest.TestCase):
def testWholeConfig(self):
text = """\
# a
a = b
<a block>
a = b
</a>
a b
<a a block>
c "d d"
</a>
# a
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()))
config = loader.loads(text)
self.assertEqual(config, {'a': ['b', {'block': {'a': 'b'}},
'b', {'a block': {'c': 'd d'}}]})
def testForceArray(self):
text = """\
b = [1]
"""
options = {
'forcearray': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'b': ['1']})
def testDuplicateBlocksUnmerged(self):
text = """\
<a>
b = 1
</a>
<a>
b = 2
</a>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()))
config = loader.loads(text)
self.assertEqual(config, {'a': [{'b': '1'}, {'b': '2'}]})
def testDuplicateBlocksMerged(self):
text = """\
<a>
b = 1
</a>
<a>
b = 2
</a>
"""
options = {
'mergeduplicateblocks': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': {'b': ['1', '2']}})
def testDuplicateOptionsAllowed(self):
text = """\
a = 1
a = 2
"""
options = {
'allowmultioptions': True
}
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': ['1', '2']})
def testDuplicateOptionsDenied(self):
text = """\
a = 1
a = 2
"""
options = {
'allowmultioptions': False
}
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
self.assertRaises(ApacheConfigError, loader.loads, text)
def testDuplicateOptionsOverriden(self):
text = """\
a = 1
a = 2
"""
options = {
'mergeduplicateoptions': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '2'})
def testDefaultConfig(self):
text = """\
a = 1
b = 2
"""
options = {
'defaultconfig': {
'b': '4',
'c': '3'
},
'mergeduplicateoptions': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '1', 'b': ['4', '2'], 'c': '3'})
def testNamedBlocks(self):
text = """\
<a b>
c = 1
</a b>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()))
config = loader.loads(text)
self.assertEqual(config, {'a': {'b': {'c': '1'}}})
def testAutoTrue(self):
text = """\
a 1
a on
a true
b 0
b off
b false
"""
options = {
'autotrue': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': ['1', '1', '1'], 'b': ['0', '0', '0']})
def testFlagBits(self):
text = """\
mode = CLEAR | UNSECURE
"""
options = {
'flagbits': {
'mode': {
'CLEAR': 1,
'STRONG': 1,
'UNSECURE': '32bit'
}
}
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'mode': {'CLEAR': 1, 'STRONG': None, 'UNSECURE': '32bit'}})
@mock.patch('os.path.exists')
def testConfigPath(self, path_exists_mock):
text = """\
<<include t.conf>>
"""
options = {
'configpath': ['a', 'b']
}
path_exists_mock.return_value = False
with make_loader(**options) as loader:
self.assertRaises(ApacheConfigError, loader.loads, text)
expected_probes = ['a/t.conf', 'b/t.conf', './t.conf']
actual_probes = [x[1][0] for x in path_exists_mock.mock_calls
if len(x[1]) and x[1][0] in expected_probes]
self.assertEqual(expected_probes, actual_probes)
@mock.patch('os.path.exists')
def testProgramPath(self, path_exists_mock):
text = """\
<<include t.conf>>
"""
options = {
'programpath': 'a/b'
}
path_exists_mock.return_value = False
with make_loader(**options) as loader:
self.assertRaises(ApacheConfigError, loader.loads, text)
expected_probes = ['a/b/t.conf']
actual_probes = [x[1][0] for x in path_exists_mock.mock_calls
if len(x[1]) and x[1][0] in expected_probes]
self.assertEqual(expected_probes, actual_probes)
@mock.patch('os.path.exists')
def testIncludeRelative(self, path_exists_mock):
text = """\
<<include t.conf>>
"""
options = {
'includerelative': True,
'configroot': 'a'
}
path_exists_mock.return_value = False
with make_loader(**options) as loader:
self.assertRaises(ApacheConfigError, loader.loads, text)
expected_probes = ['a/t.conf']
actual_probes = [x[1][0] for x in path_exists_mock.mock_calls
if len(x[1]) and x[1][0] in expected_probes]
self.assertEqual(expected_probes, actual_probes)
def testIncludeDirectories(self):
text = """\
<<include xxx>>
"""
options = {
'includedirectories': True
}
with make_loader(**options) as loader:
with mock.patch('os.path.exists') as path_exists_mock:
with mock.patch('os.path.isdir') as path_isdir_mock:
with mock.patch('os.listdir') as listdir_mock:
path_exists_mock.side_effect = lambda x: [True, False]
path_isdir_mock.side_effect = lambda x: [True, False]
listdir_mock.return_value = []
config = loader.loads(text)
self.assertEqual(config, {})
def testInterpolateVars(self):
text = """\
a = 1
b = $a
c = ${b}
e 1
<aa>
d = ${c}
e = 2
f "${e} + 2"
g = '${e}'
</aa>
"""
options = {
'interpolatevars': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '1',
'b': '1',
'c': '1',
'e': '1',
'aa': {'d': '1',
'e': '2',
'f': '2 + 2',
'g': '${e}'}})
def testInterpolateVarsSingleQuote(self):
text = """\
a = 1
b = '${a}'
"""
options = {
'allowsinglequoteinterpolation': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '1',
'b': '1'})
def testInterpolateVarsFailOnUndefined(self):
text = """\
b = ${a}
"""
options = {
'interpolatevars': True,
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
self.assertRaises(ApacheConfigError, loader.loads, text)
def testInterpolateVarsIgnoreUndefined(self):
text = """\
b = '${a}'
"""
options = {
'interpolatevars': True,
'strictvars': False
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'b': '${a}'})
def testInterpolateEnv(self):
text = """\
b = $a
c = ${b}
e 1
<aa>
d = ${c}
e = 2
f "${e} + 2"
g = '${e}'
</aa>
"""
options = {
'interpolateenv': True
}
os.environ['a'] = '1'
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'b': '1',
'c': '1',
'e': '1',
'aa': {'d': '1',
'e': '2',
'f': '2 + 2',
'g': '${e}'}})
def testHookPreOpen(self):
def pre_open(filename, basedir):
return 'blah' in filename, basedir, filename
options = {
'plug': {
'pre_open': pre_open
}
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.load('halb.conf')
self.assertEqual(config, {})
self.assertRaises(ApacheConfigError, loader.load, 'blah.conf')
def testHookPreRead(self):
text = """\
blah 1
"""
def pre_read(filepath, text):
return 'blah' in text, filepath, 'a 1\n'
options = {
'plug': {
'pre_read': pre_read
}
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '1'})
def testHookPreParse(self):
text = """\
a 1
b = 2
"""
def pre_parse_value(option, value):
return option == 'a', option, value + '1'
options = {
'plug': {
'pre_parse_value': pre_parse_value
}
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
loader = ApacheConfigLoader(ApacheConfigParser(ApacheConfigLexer()), **options)
config = loader.loads(text)
self.assertEqual(config, {'a': '11'})
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
<reponame>ettoreferranti/walkingpad
#!/usr/bin/env python3
from bleak import BleakScanner, discover
from ph4_walkingpad.pad import Controller, WalkingPad
import logging
import asyncio
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Treadmill:
address = None
controller = None
minimal_cmd_space = 0.69
def __init__(self, address):
self.address = address
self.controller = Controller()
@staticmethod
async def scan(name):
devices_dict = {}
devices_list = []
walking_belt_candidates = []
logger.info("Scanning for peripherals...")
dev = await discover()
for i in range(len(dev)):
# Print the devices discovered
info_str = ', '.join(["[%2d]" % i, str(dev[i].address), str(
dev[i].name), str(dev[i].metadata["uuids"])])
logger.info("Device: %s" % info_str)
# Put devices information into list
devices_dict[dev[i].address] = []
devices_dict[dev[i].address].append(dev[i].name)
devices_dict[dev[i].address].append(dev[i].metadata["uuids"])
devices_list.append(dev[i].address)
if name in dev[i].name.lower():
walking_belt_candidates.append(dev[i])
if len(walking_belt_candidates) > 0:
return walking_belt_candidates[0]
else:
return None
async def connect(self):
logger.info(f"Connecting to {self.address}")
await self.controller.run(self.address)
await asyncio.sleep(self.minimal_cmd_space)
async def disconnect(self):
logger.info(f"Disconnecting from {self.address}")
await self.controller.disconnect()
await asyncio.sleep(self.minimal_cmd_space)
async def read_history(self):
logger.info(f"Reading history from {self.address}")
await self.controller.ask_hist(0)
await asyncio.sleep(self.minimal_cmd_space)
async def read_stats(self):
logger.info(f"Reading status from {self.address}")
await self.controller.ask_stats()
await asyncio.sleep(self.minimal_cmd_space)
stats = self.controller.last_status
mode = stats.manual_mode
belt_state = stats.belt_state
if (belt_state == 5):
belt_state = "standby"
elif (belt_state == 0):
belt_state = "idle"
elif (belt_state == 1):
belt_state = "running"
elif (belt_state >=6):
belt_state = "starting"
distance_in_km = stats.dist / 100.0
speed_in_km = stats.speed / 10.0
latest_status = {}
latest_status['steps'] = stats.steps
latest_status['distance'] = distance_in_km
latest_status['time'] = stats.time
latest_status['speed'] = speed_in_km
latest_status['mode'] = mode
latest_status['belt'] = belt_state
return latest_status
async def start_walking(self):
logger.info("Starting to walk")
await self.controller.start_belt()
await asyncio.sleep(self.minimal_cmd_space)
async def stop_walking(self):
logger.info("Starting to walk")
await self.controller.stop_belt()
await asyncio.sleep(self.minimal_cmd_space)
async def set_speed(self,speed):
logger.info(f"Setting speed to {speed}")
await self.controller.change_speed(int(speed*10.0))
await asyncio.sleep(self.minimal_cmd_space)
async def switch_mode(self,mode):
logger.info(f"Setting mode to {mode}")
await self.controller.switch_mode(mode)
await asyncio.sleep(self.minimal_cmd_space)
|
<gh_stars>0
# coding=utf-8
from copy import copy
from mov_sdk.mov_api import MovApi
from bmc_sdk.log_service import log_service_manager
from util import *
# from config import strategy_config, account_config
class SDKImpl(object):
def __init__(self, _guid, _private_key):
self.guid = _guid
self.private_key = _private_key
self.api = MovApi(self.guid, self.private_key)
def get_price(self, symbol):
data = self.api.get_depth(symbol)
if not self.check_error(data, "query_depth"):
asks = data["data"]["asks"]
bids = data["data"]["bids"]
asks.sort()
bids.sort(reverse=True)
if len(asks) > 0 and len(bids) > 0:
return float(asks[0][0]), float(bids[0][0])
return None, None
def get_exchange(self):
'''
:return:
'''
ret = {}
data = self.api.get_exchange_info()
if not self.check_error(data, "query_exchange"):
data = data["data"]
for d in data:
symbol = (d["base_asset"]["symbol"] + "_" + d["quote_asset"]["symbol"]).lower()
price_tick = 1.0 / pow(10, d["price_decimal"])
volume_tick = 1.0 / pow(10, d["amount_decimal"])
ret[symbol] = {"price_tick": price_tick, "volume_tick": volume_tick}
return ret
def get_account(self):
'''
需要注意,account 只有可用余额的数据!
:return:
'''
ret = {}
data = self.api.get_balance()
if not self.check_error(data, "query_balance"):
balances = data["data"]["balances"]
for dic in balances:
alias = dic["asset"]["symbol"]
ret[alias] = float(dic["balance"])
return ret
def send_order(self, symbol, side, price, volume):
try:
data_arr = self.api.send_order(symbol, side, price, volume)
if data_arr:
data = data_arr[-1]
if not self.check_error(data, "query_send_order"):
orders = data["data"]["orders"]
if len(orders) > 0:
d = orders[0]
sys_order_id = str(d["order_id"])
order = OrderData()
order.symbol = symbol
order.direction = side
order.order_id = str(sys_order_id)
order.price = price
order.volume = volume
order.traded = 0
order.status = Status.SUBMITTING.value # 报单状态
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
return order
except Exception as ex:
log_service_manager.write_log("[Info] send_order error ex:{}".format(ex))
def cancel_order(self, order_id):
log_service_manager.write_log("[Info] cancel:{}".format(order_id))
return self.api.cancel_order(order_id)
def query_open_orders(self, symbol):
data = self.api.query_open_orders(symbol)
ret = []
if not self.check_error(data, "query_open_orders"):
data = data["data"]
for d in data:
order = OrderData()
order.symbol = d["symbol"]
order.order_id = str(d["order_id"])
order.direction = d["side"]
order.price = float(d["open_price"])
order.volume = float(d["amount"])
order.traded = float(d["filled_amount"])
order.status = STATUS_MOV2VT[d["status"]]
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
ret.append(order)
return ret
def query_list_orders(self, order_id_list):
ret = []
data = self.api.query_list_orders(order_id_list)
if not self.check_error(data, "query_list_orders"):
data = data["data"]
for d in data:
order = OrderData()
order.symbol = d["symbol"]
order.order_id = str(d["order_id"])
order.direction = d["side"]
order.price = float(d["open_price"])
order.volume = float(d["amount"])
order.traded = float(d["filled_amount"])
order.status = STATUS_MOV2VT[d["status"]]
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
order.cancel_time = get_str_dt_use_timestamp(d["update_timestamp"], mill=1)
ret.append(order)
return ret
def check_error(self, data, func=""):
if int(data["code"]) == 200:
return False
error_code = data["code"]
error_msg = data["msg"]
log_service_manager.write_log(
"{} query_failed, code:{},information:{}".format(str(func), str(error_code), str(error_msg)))
return True
class MovMakerStrategy(object):
def __init__(self, _account_config, _config):
self.account_config = _account_config
self.guid = _account_config.get("guid", "")
self.private_key = _account_config.get("private_key", "")
self.impl = SDKImpl(self.guid, self.private_key)
self.config = _config
self.target_symbol, self.base_symbol = self.config["symbol_pair"].split('/')
self.exchange_info = {"pos_base_symbol": 0, "pos_target_symbol": 0}
self.put_order_dict = {}
self.buy_cover_order = None
self.sell_cover_order = None
self.avg_price_long = self.config["long_config"]["avg_price"]
self.position_long = self.config["long_config"]["now_position"]
self.avg_price_short = self.config["short_config"]["avg_price"]
self.position_short = self.config["short_config"]["now_position"]
self.cover_rate = 1 - self.config["exchange_info"]["fee_rate"]
self.ask = 0
self.bid = 0
def update_account(self):
try:
balance_dic = self.impl.get_account()
self.exchange_info["pos_target_symbol"] = balance_dic[self.target_symbol] * self.config["exchange_info"][
"pos_target_symbol_percent_use"]
self.exchange_info["pos_base_symbol"] = balance_dic[self.base_symbol] * self.config["exchange_info"][
"pos_base_symbol_percent_use"]
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,update_account ex:{}".format(ex))
def update_exchange(self):
try:
exchange_dic = self.impl.get_exchange()
dic = exchange_dic.get(self.config["symbol_pair"], {})
if dic:
self.exchange_info["price_tick"] = dic["price_tick"]
self.exchange_info["volume_tick"] = dic["volume_tick"]
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,update_exchange ex:{}".format(ex))
def get_now_has_order_ids(self):
ret = list(self.put_order_dict.keys())
if self.buy_cover_order:
ret.append(self.buy_cover_order.order_id)
if self.sell_cover_order:
ret.append(self.sell_cover_order.order_id)
return ret
def get_price_list(self, direction):
items = self.put_order_dict.items()
order_ids = [x[0] for x in items]
orders = [x[1] for x in items]
price_lists = [order.price for order in orders if order.direction == direction]
if direction == "sell":
price_lists.sort()
else:
price_lists.sort(reverse=True)
return price_lists
def cancel_not_belong_orders(self):
now_order_ids = self.get_now_has_order_ids()
orders = self.impl.query_open_orders(self.config["symbol_pair"])
for order in orders:
if order.order_id not in now_order_ids:
log_service_manager.write_log("cancel:{}".format(order.order_id))
self.impl.cancel_order(order.order_id)
def put_long_orders(self):
if self.position_long:
return
start_price = (self.bid + self.ask) / 2.0
start_volume = self.config["long_config"]["start_volume"] * self.exchange_info[
"pos_base_symbol"] / self.bid / 100.0
if not start_volume > 0:
return
inc_price = self.config["long_config"]["inc_spread"] * self.bid / 100.0
inc_volume = self.config["long_config"]["inc_volume"] * self.exchange_info["pos_base_symbol"] / self.bid / 100.0
left_base_amount = self.exchange_info["pos_base_symbol"]
start_price -= inc_price
log_service_manager.write_log(
"put_long_orders,start_price:{},start_volume:{},inc_price:{},inc_volume:{}".format(start_price,
start_volume, inc_price,
inc_volume))
order_list = []
ind = 0
now_price_list = self.get_price_list("buy")
len_all = len(now_price_list)
left_num = self.config["long_config"]["put_order_num"] - len_all
for i in range(self.config["long_config"]["put_order_num"]):
if left_num > 0 and start_price * start_volume < left_base_amount * 0.95:
if (len_all == 0) or (ind == 0 and start_price - inc_price >= now_price_list[0]) \
or (ind > 0 and ind + 1 == len_all and now_price_list[ind - 1] - inc_price >= start_price) \
or (ind > 0 and ind + 1 < len_all and now_price_list[ind - 1] - inc_price >= start_price
and start_price - inc_price >= now_price_list[ind]):
use_volume = get_round_order_price(start_volume, self.exchange_info["volume_tick"])
use_price = get_round_order_price(start_price, self.exchange_info["price_tick"])
order_list.append(("buy", use_price, use_volume))
left_base_amount -= use_volume * use_price
left_num -= 1
start_volume += inc_volume
start_price -= inc_price
else:
break
while ind < len_all and start_price < now_price_list[ind]:
ind += 1
self.send_order_list(order_list)
def put_short_orders(self):
if self.position_short:
return
start_price = (self.bid + self.ask) / 2.0
start_volume = self.config["short_config"]["start_volume"] * self.exchange_info["pos_target_symbol"] / 100.0
if not start_volume > 0:
return
inc_volume = self.config["short_config"]["inc_volume"] * self.exchange_info["pos_target_symbol"] / 100.0
inc_price = self.config["short_config"]["inc_spread"] * self.bid / 100.0
left_target_amount = self.exchange_info["pos_target_symbol"]
start_price += inc_price
log_service_manager.write_log(
"put_short_orders,start_price:{},start_volume:{},inc_price:{},inc_volume:{}".format(start_price,
start_volume, inc_price,
inc_volume))
order_list = []
ind = 0
now_price_list = self.get_price_list("sell")
len_all = len(now_price_list)
left_num = self.config["short_config"]["put_order_num"] - len_all
for i in range(self.config["short_config"]["put_order_num"]):
if left_num > 0 and start_volume < left_target_amount * 0.95:
if (len_all == 0) or (ind == 0 and start_price + inc_price <= now_price_list[0]) \
or (ind > 0 and ind + 1 == len_all and now_price_list[ind - 1] + inc_price >= start_price) \
or (ind > 0 and ind + 1 < len_all and now_price_list[ind - 1] + inc_price >= start_price
and start_price + inc_price <= now_price_list[ind + 1]):
use_volume = get_round_order_price(start_volume, self.exchange_info["volume_tick"])
use_price = get_round_order_price(start_price, self.exchange_info["price_tick"])
order_list.append(("sell", use_price, use_volume))
left_target_amount -= use_volume
left_num -= 1
start_volume += inc_volume
start_price += inc_price
else:
break
while ind < len_all and start_price > now_price_list[ind]:
ind += 1
self.send_order_list(order_list)
def send_order_list(self, order_list):
for side, price, volume in order_list:
order = self.impl.send_order(self.config["symbol_pair"], side, price, volume)
if order:
self.put_order_dict[order.order_id] = copy(order)
if side == "buy":
self.exchange_info["pos_base_symbol"] -= order.price * order.volume
else:
self.exchange_info["pos_target_symbol"] -= order.volume
def put_orders(self):
self.put_long_orders()
self.put_short_orders()
def compute_avg_price(self, new_trade_price, new_trade_volume, new_trade_direction):
if new_trade_direction == "buy":
self.avg_price_long = (self.avg_price_long * self.position_long + new_trade_price * new_trade_volume) / (
self.position_long + new_trade_volume)
self.position_long += new_trade_volume
else:
self.avg_price_short = (self.avg_price_short * self.position_short + new_trade_price * new_trade_volume) / (
self.position_short + new_trade_volume)
self.position_short += new_trade_volume
log_service_manager.write_log(
"[compute_avg_price] [long:{},{}] [short:{},{}]".format(self.avg_price_long, self.position_long,
self.avg_price_short,
self.position_short))
def cover_orders(self):
now_order_ids = self.get_now_has_order_ids()
orders = self.impl.query_list_orders(now_order_ids)
all_new_traded_long = 0
all_new_traded_short = 0
for order in orders:
bef_order = self.put_order_dict.get(order.order_id, None)
if bef_order:
new_traded = order.traded - bef_order.traded
self.put_order_dict[order.order_id] = copy(order)
if not order.is_active():
self.put_order_dict.pop(order.order_id)
new_return_frozen_volume = order.volume - order.traded
if order.direction == "buy":
self.exchange_info["pos_base_symbol"] += new_return_frozen_volume * order.price
else:
self.exchange_info["pos_target_symbol"] += new_return_frozen_volume
if new_traded > 0:
self.compute_avg_price(order.price, new_traded, order.direction)
if order.direction == "buy":
all_new_traded_long += new_traded
self.exchange_info["pos_base_symbol"] -= new_traded * order.price
self.exchange_info["pos_target_symbol"] += new_traded * (1 - self.cover_rate)
else:
all_new_traded_short += new_traded
self.exchange_info["pos_base_symbol"] += new_traded * order.price * (1 - self.cover_rate)
self.exchange_info["pos_target_symbol"] -= new_traded
if self.buy_cover_order and order.order_id == self.buy_cover_order.order_id:
new_traded = order.traded - self.buy_cover_order.traded
if new_traded > 0:
self.position_long -= new_traded
self.exchange_info["pos_base_symbol"] -= new_traded * self.buy_cover_order.price
self.exchange_info["pos_target_symbol"] += new_traded * (1 - self.cover_rate)
self.buy_cover_order = copy(order)
if not order.is_active():
new_return_frozen_volume = self.buy_cover_order.volume - self.buy_cover_order.traded
self.exchange_info["pos_base_symbol"] += new_return_frozen_volume * self.buy_cover_order.price
self.buy_cover_order = None
self.put_long_orders()
if self.sell_cover_order and order.order_id == self.sell_cover_order.order_id:
new_traded = order.traded - self.sell_cover_order.traded
if new_traded > 0:
self.position_long -= new_traded
self.exchange_info["pos_base_symbol"] += new_traded * self.sell_cover_order.price * (
1 - self.cover_rate)
self.exchange_info["pos_target_symbol"] -= new_traded
self.sell_cover_order = copy(order)
if not order.is_active():
new_return_frozen_volume = self.sell_cover_order.volume - self.sell_cover_order.traded
self.exchange_info["pos_target_symbol"] += new_return_frozen_volume
self.sell_cover_order = None
self.put_short_orders()
if all_new_traded_long > 0:
if self.sell_cover_order:
self.impl.cancel_order(self.sell_cover_order.order_id)
price = self.avg_price_short * (1 - self.config["short_config"]["profit_spread"] / 100.0)
self.sell_cover_order = self.impl.send_order(self.config["symbol_pair"], "sell", price,
abs(self.position_long))
log_service_manager.write_log(
"[cover_orders] [short:{},{}]".format(self.avg_price_short, self.position_short))
if all_new_traded_short > 0:
if self.buy_cover_order:
self.impl.cancel_order(self.buy_cover_order.order_id)
price = self.avg_price_long * (1 + self.config["long_config"]["profit_spread"] / 100.0)
self.buy_cover_order = self.impl.send_order(self.config["symbol_pair"], "buy", price,
abs(self.position_short))
log_service_manager.write_log("[cover_orders] [long:{},{}]".format(self.avg_price_long, self.position_long))
def run(self):
self.update_exchange()
self.update_account()
count = 0
while True:
try:
if count % 6 == 0:
self.cancel_not_belong_orders()
self.ask, self.bid = self.impl.get_price(self.config["symbol_pair"])
if self.ask and self.bid:
# log_service_manager.write_log("[Info] cover_orders")
self.cover_orders()
# log_service_manager.write_log("[Info] put_orders")
self.put_orders()
count += 1
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,run ex:{}".format(ex))
if __name__ == "__main__":
s = MovMakerStrategy(account_config, strategy_config)
s.run()
|
<reponame>dyf-2316/Comment_Sentiment_Analysis
# -*- coding:utf-8 -*-
# @Time: 2020/7/11 10:31 PM
# @Author: dyf-2316
# @FileName: getData.py
# @Software: PyCharm
# @Project: Comment_Sentiment_Analysis
# @Description: get data from html/json
import re
from Logger import Logger
from config import *
from WebCrawler.getPageSource import get_page_html, get_json_data
mylogger = Logger("gatData").logger
def get_search_url(keywords):
"""
通过搜索界面URL与关键字的拼接,获取一个关键字商品搜索页面的URL。
:param keywords: (str) 想要查询商品的关键字
:return: (str) 含有关键字的商品搜索界面的URL
"""
search_url = SEARCH_URL.format(keywords)
# logger传入message必须是字符串(用format或%拼接好),与print相区别
mylogger.debug("获取搜索界面URL -- {}".format(search_url))
return search_url
product_already_get = ['100002076057', '47775809660', '51961736081', '51961736083', '51961736082',
'100003407023', '100011323840', '100002738154', '36937884339', '26692636341',
'34482228435', '67271641787', '6723160', '60683595850', '60683595849',
'100003407025', '50166525673', '100006961267', '29197375716', '25749487078']
def get_product_id(url, product_num=DEFAULT_PRODUCT_NUM):
"""
通过正则表达式匹配,在url下的html通过正则表达式获取product_num数量的商品ID
:param url: (str) 商品搜索界面的URL
:param product_num: (int) 所需要爬取的商品个数,默认为DEFAULT_PRODUCT_NUM
:return: (list) 商品ID列表
"""
html = get_page_html(url)
results = re.findall('<li .*?data-sku=\"(.*?)\"', html, re.S)
for result in results:
if result in product_already_get:
results.remove(result)
mylogger.debug("获取商品ID -- {}".format(results))
return results
def get_product_data(product_id):
"""
通过product_id获取含有商品信息的html/json,从中获取good_id、brand、price数据生成数据字典
:param product_id: (str) 商品ID
:return: (dict) 商品基本信息的数据字典
"""
try:
product_url = PRODUCT_URL.format(product_id)
mylogger.debug("获取商品基本信息页面URL -- {}".format(product_url))
product_html = get_page_html(product_url)
good_id = (re.findall('>货号:(.*?)</li>', product_html, re.S))[0]
brand = (re.findall("id=\"parameter-brand.*?title=\'(.*?)\'>品牌", product_html, re.S))[0]
price_url = PRICE_URL.format(product_id)
json_data = get_json_data(price_url)
price = json_data['p']
product_data = {
"good_id": good_id,
"brand": brand,
"price": price
}
mylogger.debug("获取商品基本信息字典 -- {}".format(product_data))
return product_data
except IndexError:
mylogger.error("商品基本信息空缺,跳过 -- {}".format(product_id))
return None
def get_comment_num(product_id):
"""
通过product_id获取含有商品评论的信息json,从中获取最大评论数
:param product_id: (str) 商品ID
:return: (int) 最大评论数量
"""
comment_url = COMMENT_URL.format(product_id, '1', '0')
mylogger.debug("获取商品评论页面URL:{}".format(comment_url))
json_data = get_json_data(comment_url)
comment_num = json_data['productCommentSummary']['commentCount']
mylogger.debug("获取商品总评论数 -- number:{}".format(comment_num))
return comment_num
def get_comment_data(product_id, score, page, product):
"""
获取product_id相应score和page的评论数据,与商品基本信息一起构成数据字典返回
:param product_id: (str) 商品ID
:param score: (int) 商品评分
:param page: (int) 评论页数
:param product: (dict) 商品基本信息的数据字典
:return: (dict) 商品信息及评论数据字典
"""
comment_url = COMMENT_URL.format(product_id, score, page)
mylogger.debug("获取商品评论页面URL -- {}".format(comment_url))
json_data = get_json_data(comment_url)
data = json_data['comments']
comment_data = []
for i in range(len(data)):
comment = {
"good_id": product['good_id'],
"brand": product['brand'],
"price": product['price'],
"creationTime": data[i]['creationTime'],
"score": data[i]['score'],
"comment": data[i]['content']
}
comment_data.append(comment)
mylogger.debug("获取商品评论数据 -- counts:{}".format(len(comment_data)))
return comment_data
|
<filename>tests/color_system_test.py
# -*- coding: utf-8 -*-
import json
import requests
import unittest
from pycolorname.color_system import ColorSystem
from pycolorname.utilities import make_temp
class ColorSystemTest(unittest.TestCase):
def setUp(self):
self.uut = ColorSystem()
def test_dict(self):
self.uut['test_key'] = 'test_val'
self.assertEqual(len(self.uut), 1)
self.assertEqual(list(self.uut.keys()), ['test_key'])
self.assertEqual(list(self.uut.values()), ['test_val'])
self.assertEqual(list(self.uut.items())[0], ('test_key', 'test_val'))
self.assertIn('test_key', self.uut)
self.uut.clear()
self.assertEqual(len(self.uut), 0)
self.assertNotIn('test_key', self.uut)
self.uut.update({"test_key": "test_val"})
self.assertEqual(list(self.uut.items())[0], ('test_key', 'test_val'))
def test_load(self):
old_refresh = self.uut.refresh
test_data = {"name": "color"}
try:
self.uut.refresh = lambda: test_data
with make_temp() as filename:
self.uut.load(filename)
# Test if data loaded into class
self.assertEqual(dict(self.uut), test_data)
# Test if data saved into file
with open(filename) as fp:
json_data = json.load(fp)
self.assertEqual(json_data, test_data)
# Test if data is being read from file
self.uut.clear()
self.uut.refresh = lambda: {}
self.uut.load(filename)
self.assertEqual(json_data, test_data)
# Test if load() respects refresh param and clears old data
test_data = {"name2": "color2"}
self.uut.refresh = lambda: test_data
self.uut.load(filename, refresh=True)
self.assertEqual(dict(self.uut), test_data)
finally:
self.uut.refresh = old_refresh
def test_request(self):
test_url = "http://a_url_to_test_with.org"
def mock_request(url):
self.assertEqual(url, test_url)
class MockResponse:
text = "<title>Test title</title>"
return MockResponse()
old_request = requests.request
try:
requests.request = mock_request
bs = self.uut.request(test_url)
self.assertEqual(bs.title.text, 'Test title')
finally:
requests.request = old_request
def test_hex_to_rgb(self):
self.assertEqual(self.uut.hex_to_rgb("#000"), (0, 0, 0))
self.assertEqual(self.uut.hex_to_rgb("#010101"), (1, 1, 1))
self.assertEqual(self.uut.hex_to_rgb("#aaa"), (170, 170, 170))
self.assertEqual(self.uut.hex_to_rgb("fff"), (255, 255, 255))
with self.assertRaises(ValueError):
self.uut.hex_to_rgb("a")
def test_find_closest(self):
self.uut.update({"a": (0, 0, 0),
"b": (100, 100, 100),
"c": (200, 200, 200)})
name, color = self.uut.find_closest((0, 0, 0))
self.assertEqual(name, "a")
self.assertEqual(color, (0, 0, 0))
name, color = self.uut.find_closest((210, 210, 210))
self.assertEqual(name, "c")
self.assertEqual(color, (200, 200, 200))
|
# Copyright 2012,2013 <NAME>
# Copyright 2012,2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A software OpenFlow switch
"""
"""
TODO
----
* Don't reply to HELLOs -- just send one on connect
* Pass raw OFP packet to rx handlers as well as parsed
* Once previous is done, use raw OFP for error data when appropriate
* Check self.features to see if various features/actions are enabled,
and act appropriately if they're not (rather than just doing them).
* Virtual ports currently have no config/state, but probably should.
* Provide a way to rebuild, e.g., the action handler table when the
features object is adjusted.
"""
from pox.lib.util import assert_type, initHelper, dpid_to_str
from pox.lib.revent import Event, EventMixin
from pox.lib.recoco import Timer
from pox.openflow.libopenflow_01 import *
import pox.openflow.libopenflow_01 as of
from pox.openflow.util import make_type_to_unpacker_table
from pox.openflow.flow_table import FlowTable, TableEntry
from pox.lib.packet import *
import logging
import struct
import time
# Multicast address used for STP 802.1D
_STP_MAC = EthAddr('01:80:c2:00:00:00')
class DpPacketOut (Event):
"""
Event raised when a dataplane packet is sent out a port
"""
def __init__ (self, node, packet, port):
assert assert_type("packet", packet, ethernet, none_ok=False)
self.node = node
self.packet = packet
self.port = port
self.switch = node # For backwards compatability
class SoftwareSwitchBase (object):
def __init__ (self, dpid, name=None, ports=4, miss_send_len=128,
max_buffers=100, max_entries=0x7fFFffFF, features=None):
"""
Initialize switch
- ports is a list of ofp_phy_ports or a number of ports
- miss_send_len is number of bytes to send to controller on table miss
- max_buffers is number of buffered packets to store
- max_entries is max flows entries per table
"""
if name is None: name = dpid_to_str(dpid)
self.name = name
self.dpid = dpid
if isinstance(ports, int):
ports = [self.generate_port(i) for i in range(1, ports+1)]
self.max_buffers = max_buffers
self.max_entries = max_entries
self.miss_send_len = miss_send_len
self.config_flags = 0
self._has_sent_hello = False
self.table = FlowTable()
self.table.addListeners(self)
self._lookup_count = 0
self._matched_count = 0
self.log = logging.getLogger(self.name)
self._connection = None
# buffer for packets during packet_in
self._packet_buffer = []
# Map port_no -> openflow.pylibopenflow_01.ofp_phy_ports
self.ports = {}
self.port_stats = {}
for port in ports:
self.add_port(port)
if features is not None:
self.features = features
else:
# Set up default features
self.features = SwitchFeatures()
self.features.cap_flow_stats = True
self.features.cap_table_stats = True
self.features.cap_port_stats = True
#self.features.cap_stp = True
#self.features.cap_ip_reasm = True
#self.features.cap_queue_stats = True
#self.features.cap_arp_match_ip = True
self.features.act_output = True
self.features.act_enqueue = True
self.features.act_strip_vlan = True
self.features.act_set_vlan_vid = True
self.features.act_set_vlan_pcp = True
self.features.act_set_dl_dst = True
self.features.act_set_dl_src = True
self.features.act_set_nw_dst = True
self.features.act_set_nw_src = True
self.features.act_set_nw_tos = True
self.features.act_set_tp_dst = True
self.features.act_set_tp_src = True
#self.features.act_vendor = True
# Set up handlers for incoming OpenFlow messages
# That is, self.ofp_handlers[OFPT_FOO] = self._rx_foo
self.ofp_handlers = {}
for value,name in ofp_type_map.iteritems():
name = name.split("OFPT_",1)[-1].lower()
h = getattr(self, "_rx_" + name, None)
if not h: continue
assert of._message_type_to_class[value]._from_controller, name
self.ofp_handlers[value] = h
# Set up handlers for actions
# That is, self.action_handlers[OFPAT_FOO] = self._action_foo
#TODO: Refactor this with above
self.action_handlers = {}
for value,name in ofp_action_type_map.iteritems():
name = name.split("OFPAT_",1)[-1].lower()
h = getattr(self, "_action_" + name, None)
if not h: continue
if getattr(self.features, "act_" + name) is False: continue
self.action_handlers[value] = h
# Set up handlers for stats handlers
# That is, self.stats_handlers[OFPST_FOO] = self._stats_foo
#TODO: Refactor this with above
self.stats_handlers = {}
for value,name in ofp_stats_type_map.iteritems():
name = name.split("OFPST_",1)[-1].lower()
h = getattr(self, "_stats_" + name, None)
if not h: continue
self.stats_handlers[value] = h
# Set up handlers for flow mod handlers
# That is, self.flow_mod_handlers[OFPFC_FOO] = self._flow_mod_foo
#TODO: Refactor this with above
self.flow_mod_handlers = {}
for name,value in ofp_flow_mod_command_rev_map.iteritems():
name = name.split("OFPFC_",1)[-1].lower()
h = getattr(self, "_flow_mod_" + name, None)
if not h: continue
self.flow_mod_handlers[value] = h
def _gen_port_name (self, port_no):
return "%s.%s"%(dpid_to_str(self.dpid, True).replace('-','')[:12], port_no)
def _gen_ethaddr (self, port_no):
return EthAddr("02%06x%04x" % (self.dpid % 0x00FFff, port_no % 0xffFF))
def generate_port (self, port_no, name = None, ethaddr = None):
dpid = self.dpid
p = ofp_phy_port()
p.port_no = port_no
if ethaddr is None:
p.hw_addr = self._gen_ethaddr(p.port_no)
else:
p.hw_addr = EthAddr(ethaddr)
if name is None:
p.name = self._gen_port_name(p.port_no)
else:
p.name = name
# Fill in features sort of arbitrarily
p.config = OFPPC_NO_STP
p.curr = OFPPF_10MB_HD
p.advertised = OFPPF_10MB_HD
p.supported = OFPPF_10MB_HD
p.peer = OFPPF_10MB_HD
return p
@property
def _time (self):
"""
Get the current time
This should be used for, e.g., calculating timeouts. It currently isn't
used everywhere it should be.
Override this to change time behavior.
"""
return time.time()
def _handle_FlowTableModification (self, event):
"""
Handle flow table modification events
"""
# Currently, we only use this for sending flow_removed messages
if not event.removed: return
if event.reason in (OFPRR_IDLE_TIMEOUT,OFPRR_HARD_TIMEOUT,OFPRR_DELETE):
# These reasons may lead to a flow_removed
count = 0
for entry in event.removed:
if entry.flags & OFPFF_SEND_FLOW_REM and not entry.flags & OFPFF_EMERG:
# Flow wants removal notification -- send it
fr = entry.to_flow_removed(self._time, reason=event.reason)
self.send(fr)
count += 1
self.log.debug("%d flows removed (%d removal notifications)",
len(event.removed), count)
def rx_message (self, connection, msg):
"""
Handle an incoming OpenFlow message
"""
ofp_type = msg.header_type
h = self.ofp_handlers.get(ofp_type)
if h is None:
raise RuntimeError("No handler for ofp_type %s(%d)"
% (ofp_type_map.get(ofp_type), ofp_type))
self.log.debug("Got %s with XID %s",ofp_type_map.get(ofp_type),msg.xid)
h(msg, connection=connection)
def set_connection (self, connection):
"""
Set this switch's connection.
"""
self._has_sent_hello = False
connection.set_message_handler(self.rx_message)
self._connection = connection
def send (self, message, connection = None):
"""
Send a message to this switch's communication partner
"""
if connection is None:
connection = self._connection
if connection:
connection.send(message)
else:
self.log.debug("Asked to send message %s, but not connected", message)
def _rx_hello (self, ofp, connection):
#FIXME: This isn't really how hello is supposed to work -- we're supposed
# to send it immediately on connection. See _send_hello().
self.send_hello()
def _rx_echo_request (self, ofp, connection):
"""
Handles echo requests
"""
msg = ofp_echo_reply(xid=ofp.xid, body=ofp.body)
self.send(msg)
def _rx_features_request (self, ofp, connection):
"""
Handles feature requests
"""
self.log.debug("Send features reply")
msg = ofp_features_reply(datapath_id = self.dpid,
xid = ofp.xid,
n_buffers = self.max_buffers,
n_tables = 1,
capabilities = self.features.capability_bits,
actions = self.features.action_bits,
ports = self.ports.values())
self.send(msg)
def _rx_flow_mod (self, ofp, connection):
"""
Handles flow mods
"""
self.log.debug("Flow mod details: %s", ofp.show())
#self.table.process_flow_mod(ofp)
#self._process_flow_mod(ofp, connection=connection, table=self.table)
handler = self.flow_mod_handlers.get(ofp.command)
if handler is None:
self.log.warn("Command not implemented: %s" % command)
self.send_error(type=OFPET_FLOW_MOD_FAILED, code=OFPFMFC_BAD_COMMAND,
ofp=ofp, connection=connection)
return
handler(flow_mod=ofp, connection=connection, table=self.table)
if ofp.buffer_id is not None:
self._process_actions_for_packet_from_buffer(ofp.actions, ofp.buffer_id,
ofp)
def _rx_packet_out (self, packet_out, connection):
"""
Handles packet_outs
"""
self.log.debug("Packet out details: %s", packet_out.show())
if packet_out.data:
self._process_actions_for_packet(packet_out.actions, packet_out.data,
packet_out.in_port, packet_out)
elif packet_out.buffer_id is not None:
self._process_actions_for_packet_from_buffer(packet_out.actions,
packet_out.buffer_id,
packet_out)
else:
self.log.warn("packet_out: No data and no buffer_id -- "
"don't know what to send")
def _rx_echo_reply (self, ofp, connection):
pass
def _rx_barrier_request (self, ofp, connection):
msg = ofp_barrier_reply(xid = ofp.xid)
self.send(msg)
def _rx_get_config_request (self, ofp, connection):
msg = ofp_get_config_reply(xid = ofp.xid)
msg.miss_send_len = self.miss_send_len
msg.flags = self.config_flags
self.log.debug("Sending switch config reply %s", msg)
self.send(msg)
def _rx_stats_request (self, ofp, connection):
handler = self.stats_handlers.get(ofp.type)
if handler is None:
self.log.warning("Stats type %s not implemented", ofp.type)
self.send_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_STAT,
ofp=ofp, connection=connection)
return
body = handler(ofp, connection=connection)
if body is not None:
reply = ofp_stats_reply(xid=ofp.xid, type=ofp.type, body=body)
self.log.debug("Sending stats reply %s", reply)
self.send(reply)
def _rx_set_config (self, config, connection):
self.miss_send_len = config.miss_send_len
self.config_flags = config.flags
def _rx_port_mod (self, port_mod, connection):
port_no = port_mod.port_no
if port_no not in self.ports:
self.send_error(type=OFPET_PORT_MOD_FAILED, code=OFPPMFC_BAD_PORT,
ofp=port_mod, connection=connection)
return
port = self.ports[port_no]
if port.hw_addr != port_mod.hw_addr:
self.send_error(type=OFPET_PORT_MOD_FAILED, code=OFPPMFC_BAD_HW_ADDR,
ofp=port_mod, connection=connection)
return
mask = port_mod.mask
for bit in range(32):
bit = 1 << bit
if mask & bit:
handled,r = self._set_port_config_bit(port, bit, port_mod.config & bit)
if not handled:
self.log.warn("Unsupported port config flag: %08x", bit)
continue
if r is not None:
msg = "Port %s: " % (port.port_no,)
if isinstance(r, str):
msg += r
else:
msg += ofp_port_config_map.get(bit, "config bit %x" % (bit,))
msg += " set to "
msg += "true" if r else "false"
self.log.debug(msg)
def _rx_vendor (self, vendor, connection):
# We don't support vendor extensions, so send an OFP_ERROR, per
# page 42 of spec
self.send_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_VENDOR,
ofp=vendor, connection=connection)
def _rx_queue_get_config_request (self, ofp, connection):
"""
Handles an OFPT_QUEUE_GET_CONFIG_REQUEST message.
"""
reply = ofp_queue_get_config_reply(xid=ofp.xid, port=ofp.port, queues=[])
self.log.debug("Sending queue get config reply %s", reply)
self.send(reply)
def send_hello (self, force = False):
"""
Send hello (once)
"""
#FIXME: This is wrong -- we should just send when connecting.
if self._has_sent_hello and not force: return
self._has_sent_hello = True
self.log.debug("Sent hello")
msg = ofp_hello(xid=0)
self.send(msg)
def send_packet_in (self, in_port, buffer_id=None, packet=b'', reason=None,
data_length=None):
"""
Send PacketIn
"""
if hasattr(packet, 'pack'):
packet = packet.pack()
assert assert_type("packet", packet, bytes)
self.log.debug("Send PacketIn")
if reason is None:
reason = OFPR_NO_MATCH
if data_length is not None and len(packet) > data_length:
if buffer_id is not None:
packet = packet[:data_length]
msg = ofp_packet_in(xid = 0, in_port = in_port, buffer_id = buffer_id,
reason = reason, data = packet)
self.send(msg)
def send_port_status (self, port, reason):
"""
Send port status
port is an ofp_phy_port
reason is one of OFPPR_xxx
"""
assert assert_type("port", port, ofp_phy_port, none_ok=False)
assert reason in ofp_port_reason_rev_map.values()
msg = ofp_port_status(desc=port, reason=reason)
self.send(msg)
def send_error (self, type, code, ofp=None, data=None, connection=None):
"""
Send an error
If you pass ofp, it will be used as the source of the error's XID and
data.
You can override the data by also specifying data.
"""
err = ofp_error(type=type, code=code)
if ofp:
err.xid = ofp.xid
err.data = ofp.pack()
else:
err.xid = 0
if data is not None:
err.data = data
self.send(err, connection = connection)
def rx_packet (self, packet, in_port, packet_data = None):
"""
process a dataplane packet
packet: an instance of ethernet
in_port: the integer port number
packet_data: packed version of packet if available
"""
assert assert_type("packet", packet, ethernet, none_ok=False)
assert assert_type("in_port", in_port, int, none_ok=False)
port = self.ports.get(in_port)
if port is None:
self.log.warn("Got packet on missing port %i", in_port)
return
is_stp = packet.dst == _STP_MAC
if (port.config & OFPPC_NO_RECV) and not is_stp:
# Drop all except STP
return
if (port.config & OFPPC_NO_RECV_STP) and is_stp:
# Drop STP
return
if self.config_flags & OFPC_FRAG_MASK:
ipp = packet.find(ipv4)
if ipp:
if (ipp.flags & ipv4.MF_FLAG) or ipp.frag != 0:
frag_mode = self.config_flags & OFPC_FRAG_MASK
if frag_mode == OFPC_FRAG_DROP:
# Drop fragment
return
elif frag_mode == OFPC_FRAG_REASM:
if self.features.cap_ip_reasm:
#TODO: Implement fragment reassembly
self.log.info("Can't reassemble fragment: not implemented")
else:
self.log.warn("Illegal fragment processing mode: %i", frag_mode)
self.port_stats[in_port].rx_packets += 1
if packet_data is not None:
self.port_stats[in_port].rx_bytes += len(packet_data)
else:
self.port_stats[in_port].rx_bytes += len(packet.pack()) # Expensive
self._lookup_count += 1
entry = self.table.entry_for_packet(packet, in_port)
if entry is not None:
self._matched_count += 1
entry.touch_packet(len(packet))
self._process_actions_for_packet(entry.actions, packet, in_port)
else:
# no matching entry
if port.config & OFPPC_NO_PACKET_IN:
return
buffer_id = self._buffer_packet(packet, in_port)
if packet_data is None:
packet_data = packet.pack()
self.send_packet_in(in_port, buffer_id, packet_data,
reason=OFPR_NO_MATCH, data_length=self.miss_send_len)
def delete_port (self, port):
"""
Removes a port
Sends a port_status message to the controller
Returns the removed phy_port
"""
try:
port_no = port.port_no
assert self.ports[port_no] is port
except:
port_no = port
port = self.ports[port_no]
if port_no not in self.ports:
raise RuntimeError("Can't remove nonexistent port " + str(port_no))
self.send_port_status(port, OFPPR_DELETE)
del self.ports[port_no]
return port
def add_port (self, port):
"""
Adds a port
Sends a port_status message to the controller
"""
try:
port_no = port.port_no
except:
port_no = port
port = self.generate_port(port_no, self.dpid)
if port_no in self.ports:
raise RuntimeError("Port %s already exists" % (port_no,))
self.ports[port_no] = port
self.port_stats[port.port_no] = ofp_port_stats(port_no=port.port_no)
self.send_port_status(port, OFPPR_ADD)
def _set_port_config_bit (self, port, bit, value):
"""
Set a port config bit
This is called in response to port_mods. It is passed the ofp_phy_port,
the bit/mask, and the value of the bit (i.e., 0 if the flag is to be
unset, or the same value as bit if it is to be set).
The return value is a tuple (handled, msg).
If bit is handled, then handled will be True, else False.
if msg is a string, it will be used as part of a log message.
If msg is None, there will be no log message.
If msg is anything else "truthy", an "enabled" log message is generated.
If msg is anything else "falsy", a "disabled" log message is generated.
msg is only used when handled is True.
"""
if bit == OFPPC_NO_STP:
if value == 0:
# we also might send OFPBRC_EPERM if trying to disable this bit
self.log.warn("Port %s: Can't enable 802.1D STP", port.port_no)
return (True, None)
if bit not in (OFPPC_PORT_DOWN, OFPPC_NO_STP, OFPPC_NO_RECV, OFPPC_NO_RECV_STP,
OFPPC_NO_FLOOD, OFPPC_NO_FWD, OFPPC_NO_PACKET_IN):
return (False, None)
if port.set_config(value, bit):
if bit == OFPPC_PORT_DOWN:
# Note (<NAME>): Although the spec is not clear about it,
# we will assume that config.OFPPC_PORT_DOWN implies
# state.OFPPS_LINK_DOWN. This is consistent with Open vSwitch.
#TODO: for now, we assume that there is always physical link present
# and that the link state depends only on the configuration.
old_state = port.state & OFPPS_LINK_DOWN
port.state = port.state & ~OFPPS_LINK_DOWN
if port.config & OFPPC_PORT_DOWN:
port.state = port.state | OFPPS_LINK_DOWN
new_state = port.state & OFPPS_LINK_DOWN
if old_state != new_state:
self.send_port_status(port, OFPPR_MODIFY)
# Do default log message.
return (True, value)
# No change -- no log message.
return (True, None)
def _output_packet_physical (self, packet, port_no):
"""
send a packet out a single physical port
This is called by the more general _output_packet().
Override this.
"""
self.log.info("Sending packet %s out port %s", str(packet), port_no)
def _output_packet (self, packet, out_port, in_port, max_len=None):
"""
send a packet out some port
This handles virtual ports and does validation.
packet: instance of ethernet
out_port, in_port: the integer port number
max_len: maximum packet payload length to send to controller
"""
assert assert_type("packet", packet, ethernet, none_ok=False)
def real_send (port_no, allow_in_port=False):
if type(port_no) == ofp_phy_port:
port_no = port_no.port_no
if port_no == in_port and not allow_in_port:
self.log.warn("Dropping packet sent on port %i: Input port", port_no)
return
if port_no not in self.ports:
self.log.warn("Dropping packet sent on port %i: Invalid port", port_no)
return
if self.ports[port_no].config & OFPPC_NO_FWD:
self.log.warn("Dropping packet sent on port %i: Forwarding disabled",
port_no)
return
if self.ports[port_no].config & OFPPC_PORT_DOWN:
self.log.warn("Dropping packet sent on port %i: Port down", port_no)
return
if self.ports[port_no].state & OFPPS_LINK_DOWN:
self.log.debug("Dropping packet sent on port %i: Link down", port_no)
return
self.port_stats[port_no].tx_packets += 1
self.port_stats[port_no].tx_bytes += len(packet.pack()) #FIXME: Expensive
self._output_packet_physical(packet, port_no)
if out_port < OFPP_MAX:
real_send(out_port)
elif out_port == OFPP_IN_PORT:
real_send(in_port, allow_in_port=True)
elif out_port == OFPP_FLOOD:
for no,port in self.ports.iteritems():
if no == in_port: continue
if port.config & OFPPC_NO_FLOOD: continue
real_send(port)
elif out_port == OFPP_ALL:
for no,port in self.ports.iteritems():
if no == in_port: continue
real_send(port)
elif out_port == OFPP_CONTROLLER:
buffer_id = self._buffer_packet(packet, in_port)
# Should we honor OFPPC_NO_PACKET_IN here?
self.send_packet_in(in_port, buffer_id, packet, reason=OFPR_ACTION,
data_length=max_len)
elif out_port == OFPP_TABLE:
# Do we disable send-to-controller when performing this?
# (Currently, there's the possibility that a table miss from this
# will result in a send-to-controller which may send back to table...)
self.rx_packet(packet, in_port)
else:
self.log.warn("Unsupported virtual output port: %d", out_port)
def _buffer_packet (self, packet, in_port=None):
"""
Buffer packet and return buffer ID
If no buffer is available, return None.
"""
# Do we have an empty slot?
for (i, value) in enumerate(self._packet_buffer):
if value is None:
# Yes -- use it
self._packet_buffer[i] = (packet, in_port)
return i + 1
# No -- create a new slow
if len(self._packet_buffer) >= self.max_buffers:
# No buffers available!
return None
self._packet_buffer.append( (packet, in_port) )
return len(self._packet_buffer)
def _process_actions_for_packet_from_buffer (self, actions, buffer_id,
ofp=None):
"""
output and release a packet from the buffer
ofp is the message which triggered this processing, if any (used for error
generation)
"""
buffer_id = buffer_id - 1
if (buffer_id >= len(self._packet_buffer)) or (buffer_id < 0):
self.log.warn("Invalid output buffer id: %d", buffer_id + 1)
return
if self._packet_buffer[buffer_id] is None:
self.log.warn("Buffer %d has already been flushed", buffer_id + 1)
return
(packet, in_port) = self._packet_buffer[buffer_id]
self._process_actions_for_packet(actions, packet, in_port, ofp)
self._packet_buffer[buffer_id] = None
def _process_actions_for_packet (self, actions, packet, in_port, ofp=None):
"""
process the output actions for a packet
ofp is the message which triggered this processing, if any (used for error
generation)
"""
assert assert_type("packet", packet, (ethernet, bytes), none_ok=False)
if not isinstance(packet, ethernet):
packet = ethernet.unpack(packet)
for action in actions:
#if action.type is ofp_action_resubmit:
# self.rx_packet(packet, in_port)
# return
h = self.action_handlers.get(action.type)
if h is None:
self.log.warn("Unknown action type: %x " % (action.type,))
self.send_error(type=OFPET_BAD_ACTION, code=OFPBAC_BAD_TYPE, ofp=ofp)
return
packet = h(action, packet, in_port)
def _flow_mod_add (self, flow_mod, connection, table):
"""
Process an OFPFC_ADD flow mod sent to the switch.
"""
match = flow_mod.match
priority = flow_mod.priority
if flow_mod.flags & OFPFF_EMERG:
if flow_mod.idle_timeout != 0 or flow_mod.hard_timeout != 0:
# Emergency flow mod has non-zero timeouts. Do not add.
self.log.warn("Rejecting emergency flow with nonzero timeout")
self.send_error(type=OFPET_FLOW_MOD_FAILED,
code=OFPFMFC_BAD_EMERG_TIMEOUT,
ofp=flow_mod, connection=connection)
return
if flow_mod.flags & OFPFF_SEND_FLOW_REM:
# Emergency flows can't send removal messages, we we might want to
# reject this early. Sadly, there's no error code for this, so we just
# abuse EPERM. If we eventually support Nicira extended error codes,
# we should use one here.
self.log.warn("Rejecting emergency flow with flow removal flag")
self.send_error(type=OFPET_FLOW_MOD_FAILED,
code=OFPFMFC_EPERM,
ofp=flow_mod, connection=connection)
return
#NOTE: An error is sent anyways because the current implementation does
# not support emergency entries.
self.log.warn("Rejecting emergency flow (not supported)")
self.send_error(type=OFPET_FLOW_MOD_FAILED,
code=OFPFMFC_ALL_TABLES_FULL,
ofp=flow_mod, connection=connection)
return
new_entry = TableEntry.from_flow_mod(flow_mod)
if flow_mod.flags & OFPFF_CHECK_OVERLAP:
if table.check_for_overlapping_entry(new_entry):
# Another entry overlaps. Do not add.
self.send_error(type=OFPET_FLOW_MOD_FAILED, code=OFPFMFC_OVERLAP,
ofp=flow_mod, connection=connection)
return
if flow_mod.command == OFPFC_ADD:
# Exactly matching entries have to be removed if OFPFC_ADD
table.remove_matching_entries(match, priority=priority, strict=True)
if len(table) >= self.max_entries:
# Flow table is full. Respond with error message.
self.send_error(type=OFPET_FLOW_MOD_FAILED,
code=OFPFMFC_ALL_TABLES_FULL,
ofp=flow_mod, connection=connection)
return
table.add_entry(new_entry)
def _flow_mod_modify (self, flow_mod, connection, table, strict=False):
"""
Process an OFPFC_MODIFY flow mod sent to the switch.
"""
match = flow_mod.match
priority = flow_mod.priority
modified = False
for entry in table.entries:
# update the actions field in the matching flows
if entry.is_matched_by(match, priority=priority, strict=strict):
entry.actions = flow_mod.actions
modified = True
if not modified:
# if no matching entry is found, modify acts as add
self._flow_mod_add(flow_mod, connection, table)
def _flow_mod_modify_strict (self, flow_mod, connection, table):
"""
Process an OFPFC_MODIFY_STRICT flow mod sent to the switch.
"""
self._flow_mod_modify(flow_mod, connection, table, strict=True)
def _flow_mod_delete (self, flow_mod, connection, table, strict=False):
"""
Process an OFPFC_DELETE flow mod sent to the switch.
"""
match = flow_mod.match
priority = flow_mod.priority
out_port = flow_mod.out_port
if out_port == OFPP_NONE: out_port = None # Don't filter
table.remove_matching_entries(match, priority=priority, strict=strict,
out_port=out_port, reason=OFPRR_DELETE)
def _flow_mod_delete_strict (self, flow_mod, connection, table):
"""
Process an OFPFC_DELETE_STRICT flow mod sent to the switch.
"""
self._flow_mod_delete(flow_mod, connection, table, strict=True)
def _action_output (self, action, packet, in_port):
self._output_packet(packet, action.port, in_port, action.max_len)
return packet
def _action_set_vlan_vid (self, action, packet, in_port):
if not isinstance(packet.payload, vlan):
vl = vlan()
vl.eth_type = packet.type
vl.payload = packet.payload
packet.type = ethernet.VLAN_TYPE
packet.payload = vl
packet.payload.id = action.vlan_vid
return packet
def _action_set_vlan_pcp (self, action, packet, in_port):
if not isinstance(packet.payload, vlan):
vl = vlan()
vl.payload = packet.payload
vl.eth_type = packet.type
packet.payload = vl
packet.type = ethernet.VLAN_TYPE
packet.payload.pcp = action.vlan_pcp
return packet
def _action_strip_vlan (self, action, packet, in_port):
if isinstance(packet.payload, vlan):
packet.type = packet.payload.eth_type
packet.payload = packet.payload.payload
return packet
def _action_set_dl_src (self, action, packet, in_port):
packet.src = action.dl_addr
return packet
def _action_set_dl_dst (self, action, packet, in_port):
packet.dst = action.dl_addr
return packet
def _action_set_nw_src (self, action, packet, in_port):
nw = packet.payload
if isinstance(nw, vlan):
nw = nw.payload
if isinstance(nw, ipv4):
nw.srcip = action.nw_addr
return packet
def _action_set_nw_dst (self, action, packet, in_port):
nw = packet.payload
if isinstance(nw, vlan):
nw = nw.payload
if isinstance(nw, ipv4):
nw.dstip = action.nw_addr
return packet
def _action_set_nw_tos (self, action, packet, in_port):
nw = packet.payload
if isinstance(nw, vlan):
nw = nw.payload
if isinstance(nw, ipv4):
nw.tos = action.nw_tos
return packet
def _action_set_tp_src (self, action, packet, in_port):
nw = packet.payload
if isinstance(nw, vlan):
nw = nw.payload
if isinstance(nw, ipv4):
tp = nw.payload
if isinstance(tp, udp) or isinstance(tp, tcp):
tp.srcport = action.tp_port
return packet
def _action_set_tp_dst (self, action, packet, in_port):
nw = packet.payload
if isinstance(nw, vlan):
nw = nw.payload
if isinstance(nw, ipv4):
tp = nw.payload
if isinstance(tp, udp) or isinstance(tp, tcp):
tp.dstport = action.tp_port
return packet
def _action_enqueue (self, action, packet, in_port):
self.log.warn("Enqueue not supported. Performing regular output.")
self._output_packet(packet, action.tp_port, in_port)
return packet
# def _action_push_mpls_tag (self, action, packet, in_port):
# bottom_of_stack = isinstance(packet.next, mpls)
# packet.next = mpls(prev = packet.pack())
# if bottom_of_stack:
# packet.next.s = 1
# packet.type = action.ethertype
# return packet
# def _action_pop_mpls_tag (self, action, packet, in_port):
# if not isinstance(packet.next, mpls):
# return packet
# if not isinstance(packet.next.next, str):
# packet.next.next = packet.next.next.pack()
# if action.ethertype in ethernet.type_parsers:
# packet.next = ethernet.type_parsers[action.ethertype](packet.next.next)
# else:
# packet.next = packet.next.next
# packet.ethertype = action.ethertype
# return packet
# def _action_set_mpls_label (self, action, packet, in_port):
# if not isinstance(packet.next, mpls):
# mock = ofp_action_push_mpls()
# packet = push_mpls_tag(mock, packet)
# packet.next.label = action.mpls_label
# return packet
# def _action_set_mpls_tc (self, action, packet, in_port):
# if not isinstance(packet.next, mpls):
# mock = ofp_action_push_mpls()
# packet = push_mpls_tag(mock, packet)
# packet.next.tc = action.mpls_tc
# return packet
# def _action_set_mpls_ttl (self, action, packet, in_port):
# if not isinstance(packet.next, mpls):
# mock = ofp_action_push_mpls()
# packet = push_mpls_tag(mock, packet)
# packet.next.ttl = action.mpls_ttl
# return packet
# def _action_dec_mpls_ttl (self, action, packet, in_port):
# if not isinstance(packet.next, mpls):
# return packet
# packet.next.ttl = packet.next.ttl - 1
# return packet
def _stats_desc (self, ofp, connection):
try:
from pox.core import core
return ofp_desc_stats(mfr_desc="POX",
hw_desc=core._get_platform_info(),
sw_desc=core.version_string,
serial_num=str(self.dpid),
dp_desc=type(self).__name__)
except:
return ofp_desc_stats(mfr_desc="POX",
hw_desc="Unknown",
sw_desc="Unknown",
serial_num=str(self.dpid),
dp_desc=type(self).__name__)
def _stats_flow (self, ofp, connection):
if ofp.body.table_id not in (TABLE_ALL, 0):
return [] # No flows for other tables
out_port = ofp.body.out_port
if out_port == OFPP_NONE: out_port = None # Don't filter
return self.table.flow_stats(ofp.body.match, out_port)
def _stats_aggregate (self, ofp, connection):
if ofp.body.table_id not in (TABLE_ALL, 0):
return [] # No flows for other tables
out_port = ofp.body.out_port
if out_port == OFPP_NONE: out_port = None # Don't filter
return self.table.aggregate_stats(ofp.body.match, out_port)
def _stats_table (self, ofp, connection):
# Some of these may come from the actual table(s) in the future...
r = ofp_table_stats()
r.table_id = 0
r.name = "Default"
r.wildcards = OFPFW_ALL
r.max_entries = self.max_entries
r.active_count = len(self.table)
r.lookup_count = self._lookup_count
r.matched_count = self._matched_count
return r
def _stats_port (self, ofp, connection):
req = ofp.body
if req.port_no == OFPP_NONE:
return self.port_stats.values()
else:
return self.port_stats[req.port_no]
def _stats_queue (self, ofp, connection):
# We don't support queues whatsoever so either send an empty list or send
# an OFP_ERROR if an actual queue is requested.
req = ofp.body
#if req.port_no != OFPP_ALL:
# self.send_error(type=OFPET_QUEUE_OP_FAILED, code=OFPQOFC_BAD_PORT,
# ofp=ofp, connection=connection)
# Note: We don't care about this case for now, even if port_no is bogus.
if req.queue_id == OFPQ_ALL:
return []
else:
self.send_error(type=OFPET_QUEUE_OP_FAILED, code=OFPQOFC_BAD_QUEUE,
ofp=ofp, connection=connection)
def __repr__ (self):
return "%s(dpid=%s, num_ports=%d)" % (type(self).__name__,
dpid_to_str(self.dpid),
len(self.ports))
class SoftwareSwitch (SoftwareSwitchBase, EventMixin):
_eventMixin_events = set([DpPacketOut])
def _output_packet_physical (self, packet, port_no):
"""
send a packet out a single physical port
This is called by the more general _output_packet().
"""
self.raiseEvent(DpPacketOut(self, packet, self.ports[port_no]))
class ExpireMixin (object):
"""
Adds expiration to a switch
Inherit *before* switch base.
"""
_expire_period = 2
def __init__ (self, *args, **kw):
expire_period = kw.pop('expire_period', self._expire_period)
super(ExpireMixin,self).__init__(*args, **kw)
if not expire_period:
# Disable
return
self._expire_timer = Timer(expire_period,
self.table.remove_expired_entries,
recurring=True)
class OFConnection (object):
"""
A codec for OpenFlow messages.
Decodes and encodes OpenFlow messages (ofp_message) into byte arrays.
Wraps an io_worker that does the actual io work, and calls a
receiver_callback function when a new message as arrived.
"""
# Unlike of_01.Connection, this is persistent (at least until we implement
# a proper recoco Connection Listener loop)
# Globally unique identifier for the Connection instance
ID = 0
# See _error_handler for information the meanings of these
ERR_BAD_VERSION = 1
ERR_NO_UNPACKER = 2
ERR_BAD_LENGTH = 3
ERR_EXCEPTION = 4
# These methods are called externally by IOWorker
def msg (self, m):
self.log.debug("%s %s", str(self), str(m))
def err (self, m):
self.log.error("%s %s", str(self), str(m))
def info (self, m):
self.log.info("%s %s", str(self), str(m))
def __init__ (self, io_worker):
self.starting = True # No data yet
self.io_worker = io_worker
self.io_worker.rx_handler = self.read
self.controller_id = io_worker.socket.getpeername()
OFConnection.ID += 1
self.ID = OFConnection.ID
self.log = logging.getLogger("ControllerConnection(id=%d)" % (self.ID,))
self.unpackers = make_type_to_unpacker_table()
self.on_message_received = None
def set_message_handler (self, handler):
self.on_message_received = handler
def send (self, data):
"""
Send raw data to the controller.
Generally, data is a bytes object. If not, we check if it has a pack()
method and call it (hoping the result will be a bytes object). This
way, you can just pass one of the OpenFlow objects from the OpenFlow
library to it and get the expected result, for example.
"""
if type(data) is not bytes:
if hasattr(data, 'pack'):
data = data.pack()
self.io_worker.send(data)
def read (self, io_worker):
#FIXME: Do we need to pass io_worker here?
while True:
message = io_worker.peek()
if len(message) < 4:
break
# Parse head of OpenFlow message by hand
ofp_version = ord(message[0])
ofp_type = ord(message[1])
if ofp_version != OFP_VERSION:
info = ofp_version
r = self._error_handler(self.ERR_BAD_VERSION, info)
if r is False: break
continue
message_length = ord(message[2]) << 8 | ord(message[3])
if message_length > len(message):
break
if ofp_type >= 0 and ofp_type < len(self.unpackers):
unpacker = self.unpackers[ofp_type]
else:
unpacker = None
if unpacker is None:
info = (ofp_type, message_length)
r = self._error_handler(self.ERR_NO_UNPACKER, info)
if r is False: break
io_worker.consume_receive_buf(message_length)
continue
new_offset, msg_obj = self.unpackers[ofp_type](message, 0)
if new_offset != message_length:
info = (msg_obj, message_length, new_offset)
r = self._error_handler(self.ERR_BAD_LENGTH, info)
if r is False: break
# Assume sender was right and we should skip what it told us to.
io_worker.consume_receive_buf(message_length)
continue
io_worker.consume_receive_buf(message_length)
self.starting = False
if self.on_message_received is None:
raise RuntimeError("on_message_receieved hasn't been set yet!")
try:
self.on_message_received(self, msg_obj)
except Exception as e:
info = (e, message[:message_length], msg_obj)
r = self._error_handler(self.ERR_EXCEPTION, info)
if r is False: break
continue
return True
def _error_handler (self, reason, info):
"""
Called when read() has an error
reason is one of OFConnection.ERR_X
info depends on reason:
ERR_BAD_VERSION: claimed version number
ERR_NO_UNPACKER: (claimed message type, claimed length)
ERR_BAD_LENGTH: (unpacked message, claimed length, unpacked length)
ERR_EXCEPTION: (exception, raw message, unpacked message)
Return False to halt processing of subsequent data (makes sense to
do this if you called connection.close() here, for example).
"""
if reason == OFConnection.ERR_BAD_VERSION:
ofp_version = info
self.log.warn('Unsupported OpenFlow version 0x%02x', info)
if self.starting:
message = self.io_worker.peek()
err = ofp_error(type=OFPET_HELLO_FAILED, code=OFPHFC_INCOMPATIBLE)
#err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_VERSION)
err.xid = self._extract_message_xid(message)
err.data = 'Version unsupported'
self.send(err)
self.close()
return False
elif reason == OFConnection.ERR_NO_UNPACKER:
ofp_type, message_length = info
self.log.warn('Unsupported OpenFlow message type 0x%02x', ofp_type)
message = self.io_worker.peek()
err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_TYPE)
err.xid = self._extract_message_xid(message)
err.data = message[:message_length]
self.send(err)
elif reason == OFConnection.ERR_BAD_LENGTH:
msg_obj, message_length, new_offset = info
t = type(msg_obj).__name__
self.log.error('Different idea of message length for %s '
'(us:%s them:%s)' % (t, new_offset, message_length))
message = self.io_worker.peek()
err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_LEN)
err.xid = self._extract_message_xid(message)
err.data = message[:message_length]
self.send(err)
elif reason == OFConnection.ERR_EXCEPTION:
ex, raw_message, msg_obj = info
t = type(ex).__name__
self.log.exception('Exception handling %s' % (t,))
else:
self.log.error("Unhandled error")
self.close()
return False
def _extract_message_xid (self, message):
"""
Extract and return the xid (and length) of an openflow message.
"""
xid = 0
if len(message) >= 8:
#xid = struct.unpack_from('!L', message, 4)[0]
message_length, xid = struct.unpack_from('!HL', message, 2)
elif len(message) >= 4:
message_length = ord(message[2]) << 8 | ord(message[3])
else:
message_length = len(message)
return xid
def close (self):
self.io_worker.shutdown()
def get_controller_id (self):
"""
Return a tuple of the controller's (address, port) we are connected to
"""
return self.controller_id
def __str__ (self):
return "[Con " + str(self.ID) + "]"
class SwitchFeatures (object):
"""
Stores switch features
Keeps settings for switch capabilities and supported actions.
Automatically has attributes of the form ".act_foo" for all OFPAT_FOO,
and ".cap_foo" for all OFPC_FOO (as gathered from libopenflow).
"""
def __init__ (self, **kw):
self._cap_info = {}
for val,name in ofp_capabilities_map.iteritems():
name = name[5:].lower() # strip OFPC_
name = "cap_" + name
setattr(self, name, False)
self._cap_info[name] = val
self._act_info = {}
for val,name in ofp_action_type_map.iteritems():
name = name[6:].lower() # strip OFPAT_
name = "act_" + name
setattr(self, name, False)
self._act_info[name] = val
self._locked = True
initHelper(self, kw)
def __setattr__ (self, attr, value):
if getattr(self, '_locked', False):
if not hasattr(self, attr):
raise AttributeError("No such attribute as '%s'" % (attr,))
return super(SwitchFeatures,self).__setattr__(attr, value)
@property
def capability_bits (self):
"""
Value used in features reply
"""
return sum( (v if getattr(self, k) else 0)
for k,v in self._cap_info.iteritems() )
@property
def action_bits (self):
"""
Value used in features reply
"""
return sum( (1<<v if getattr(self, k) else 0)
for k,v in self._act_info.iteritems() )
def __str__ (self):
l = list(k for k in self._cap_info if getattr(self, k))
l += list(k for k in self._act_info if getattr(self, k))
return ",".join(l)
|
<gh_stars>10-100
# Copyright (C) 2015 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework import status
class BaseServiceException(Exception):
"""Configuration, or core service logic has had an error
This is an internal only exception and should only be thrown
when and error occurs that the user shouldn't see.
If thrown during the course of an API call will be caught and returned
to the user as an ServiceUnavailable error with a 503 response.
"""
default_message = "A internal service error has occured."
def __init__(self, message=None):
self.message = message
def __str__(self):
return self.message or self.default_message
class InvalidActionClass(BaseServiceException):
default_message = "Cannot register action not built off the BaseAction class."
class InvalidActionSerializer(BaseServiceException):
default_message = "Action serializer must be a valid DRF serializer."
class InvalidTaskClass(BaseServiceException):
default_message = "Action serializer must be a valid DRF serializer."
class InvalidAPIClass(BaseServiceException):
default_message = "Cannot register task not built off the BaseTask class."
class DelegateAPINotRegistered(BaseServiceException):
default_message = "Failed to setup DelegateAPI that has not been registered."
class TaskNotRegistered(BaseServiceException):
default_message = "Failed to setup Task that has not been registered."
class ActionNotRegistered(BaseServiceException):
default_message = "Failed to setup Action that has not been registered."
class SerializerMissingException(BaseServiceException):
default_message = "Serializer configured but it does not exist."
class ConfigurationException(BaseServiceException):
default_message = "Missing or incorrect configuration value."
class BaseAPIException(Exception):
"""An Task error occurred."""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, message=None, internal_message=None):
if message:
self.message = message
else:
self.message = self.default_message
self.internal_message = internal_message
def __str__(self):
message = ""
if self.internal_message:
message = "%s - " % self.internal_message
message += str(self.message)
return message
class NotFound(BaseAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_message = "Not found."
class TaskNotFound(NotFound):
status_code = status.HTTP_404_NOT_FOUND
default_message = "Task not found."
class ServiceUnavailable(BaseAPIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_message = "Service temporarily unavailable, try again later."
class TaskSerializersInvalid(BaseAPIException):
default_message = "Data passed to the Task was invalid."
class TaskDuplicateFound(BaseAPIException):
default_message = "This Task already exists."
status_code = status.HTTP_409_CONFLICT
class BaseTaskException(BaseAPIException):
default_message = "An Task error occurred."
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, task, message=None, internal_message=None):
super(BaseTaskException, self).__init__(message, internal_message)
self.task = task
def __str__(self):
message = "%s (%s) - " % (self.task.task_type, self.task.uuid)
message += super(BaseTaskException, self).__str__()
return message
class TaskTokenSerializersInvalid(BaseTaskException):
default_message = "Data passed for the Task token was invalid."
class TaskActionsInvalid(BaseTaskException):
default_message = "One or more of the Task actions was invalid."
class TaskStateInvalid(BaseTaskException):
default_message = "Action does is not possible on task in current state."
class TaskActionsFailed(BaseTaskException):
"""For use when Task processing fails and we want to wrap that."""
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_message = "Service temporarily unavailable, try again later."
|
# Create your views here.
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_circles.serializers import CreateCircleSerializer, GetCirclesCircleSerializer, DeleteCircleSerializer, \
UpdateCircleSerializer, CircleNameCheckSerializer, GetCircleCircleSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.responses import ApiMessageResponse
from openbook_common.utils.helpers import normalise_request_data, nomalize_usernames_in_request_data
class Circles(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = CreateCircleSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
name = data.get('name')
color = data.get('color')
user = request.user
with transaction.atomic():
circle = user.create_circle(name=name, color=color)
response_serializer = GetCirclesCircleSerializer(circle, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
def get(self, request):
user = request.user
circles = user.circles.order_by('-id')
response_serializer = GetCirclesCircleSerializer(circles, many=True, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class CircleItem(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request, circle_id):
user = request.user
circle = user.get_circle_with_id(circle_id)
response_serializer = GetCircleCircleSerializer(circle, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def delete(self, request, circle_id):
user = request.user
serializer = DeleteCircleSerializer(data={'circle_id': circle_id})
serializer.is_valid(raise_exception=True)
with transaction.atomic():
user.delete_circle_with_id(circle_id)
return Response(status=status.HTTP_200_OK)
def patch(self, request, circle_id):
request_data = normalise_request_data(request.data)
request_data['circle_id'] = circle_id
nomalize_usernames_in_request_data(request_data)
serializer = UpdateCircleSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
circle_id = data.get('circle_id')
color = data.get('color')
usernames = data.get('usernames')
name = data.get('name')
user = request.user
with transaction.atomic():
circle = user.update_circle_with_id(circle_id, color=color, usernames=usernames, name=name)
response_serializer = GetCircleCircleSerializer(circle, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class CircleNameCheck(APIView):
"""
The API to check if a circleName is both valid and not taken.
"""
serializer_class = CircleNameCheckSerializer
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data.get('name')
user = request.user
if not user.has_circle_with_name(name):
return ApiMessageResponse(_('Circle name available'), status=status.HTTP_202_ACCEPTED)
return ApiMessageResponse(_('Circle name not available'), status=status.HTTP_400_BAD_REQUEST)
|
<filename>src/rival_regions_wrapper/api.py
"""
Rival Regions API methods
"""
import time
from rival_regions_wrapper import LOGGER
from rival_regions_wrapper.cookie_handler import CookieHandler
from rival_regions_wrapper.exceptions import (
SessionExpireException,
NoLogginException,
)
def session_handler(func):
"""Handle expired sessions"""
def wrapper(*args, **kwargs):
instance = args[0]
return try_run(instance, func, *args, **kwargs)
def try_run(instance, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except (
SessionExpireException,
ConnectionError,
ConnectionResetError,
):
CookieHandler.remove_cookie(instance.username)
instance.authenticate()
return try_run(instance, func, *args, **kwargs)
except NoLogginException:
instance.authenticate()
return try_run(instance, func, *args, **kwargs)
return wrapper
def check_response(response):
"""Check resonse for authentication"""
if not isinstance(response, str):
response = response.text
if (
"Session expired, please, reload the page" in response
or 'window.location="https://rivalregions.com";' in response
):
raise SessionExpireException()
@session_handler
def get(middleware, path, add_var_c=False):
"""Send get request to Rival Regions"""
if path[0] == "/":
path = path[1:]
params = {}
if add_var_c:
params["c"] = middleware.authentication_handler.var_c
LOGGER.info(
'"%s": GET: "%s" var_c: %s', middleware.username, path, add_var_c
)
if middleware.authentication_handler.session:
response = middleware.authentication_handler.session.get(
url="https://rivalregions.com/{}".format(path), params=params
)
check_response(response)
else:
raise NoLogginException()
return response.text
@session_handler
def post(middleware, path, data=None):
"""Send post request to Rival Regions"""
if path[0] == "/":
path = path[1:]
if not data:
data = {}
data["c"] = middleware.authentication_handler.var_c
LOGGER.info('"%s": POST: "%s"', middleware.username, path)
if middleware.authentication_handler.session:
response = middleware.authentication_handler.session.post(
"https://rivalregions.com/{}".format(path), data=data
)
check_response(response)
else:
raise NoLogginException()
return response.text
@session_handler
def conference_message(middleware, conference_id, message):
"""Send conference message"""
browser = middleware.authentication_handler.get_browser()
try:
browser.go_to(
"https://rivalregions.com/#slide/conference/{}".format(
conference_id
)
)
browser.refresh()
time.sleep(2)
character_count = 0
tmp_messages = []
for sentence in message.split("\n"):
sentence_character_count = 0
tmp_sentence = []
for word in sentence.split(" "):
sentence_character_count += len(word) + 1
if sentence_character_count >= 899:
message = "{}\n{}".format(
"\n".join(tmp_messages), " ".join(tmp_sentence)
)
LOGGER.info(
'"%s": CONF "%s": next message length: %s',
middleware.username,
conference_id,
len(message),
)
browser.type(message, id="message")
browser.click(id="chat_send")
sentence_character_count = 0
tmp_sentence = []
character_count = 0
tmp_messages = []
tmp_sentence.append(word)
sentence = " ".join(tmp_sentence)
character_count += len(sentence) + 1
if character_count >= 900:
message = "\n".join(tmp_messages)
LOGGER.info(
'"%s": CONF "%s": next message length: %s',
middleware.username,
conference_id,
len(message),
)
browser.type(message, id="message")
browser.click(id="chat_send")
character_count = 0
tmp_messages = []
tmp_messages.append(sentence)
if tmp_messages:
message = "\n".join(tmp_messages)
LOGGER.info(
'"%s": CONF "%s": next message length: %s',
middleware.username,
conference_id,
len(message),
)
browser.type(message, id="message")
browser.click(id="chat_send")
LOGGER.info(
'"%s": CONF "%s": finished sending message',
middleware.username,
conference_id,
)
finally:
browser.close_current_tab()
@session_handler
def conference_notification(middleware, conference_id, message, sound):
"""Send conference notification"""
data = {
"sound": 1 if sound else 0,
"text": message,
"c": middleware.authentication_handler.var_c,
}
response = middleware.post(
"https://rivalregions.com/rival/konffcm/{}/".format(conference_id),
data=data,
)
check_response(response)
LOGGER.info(
'"%s": CONF: id %s send notification ',
middleware.username,
conference_id,
)
return response
@session_handler
def conference_change_title(middleware, conference_id, title):
"""Change conference title"""
data = {
"t": title,
"c": middleware.authentication_handler.var_c,
}
response = middleware.post(
"https://rivalregions.com/rival/changename/{}/".format(conference_id),
data=data,
)
check_response(response)
LOGGER.info(
'"%s": CONF: id %s changed title', middleware.username, conference_id
)
return response
@session_handler
def profile_message(middleware, profile_id, message):
"""send personal message"""
LOGGER.info('"%s": PM: user id %s', middleware.username, profile_id)
browser = middleware.authentication_handler.get_browser()
try:
browser.go_to(
"https://rivalregions.com/#messages/{}".format(profile_id)
)
browser.refresh()
time.sleep(2)
browser.type(message, id="message")
browser.click(id="chat_send")
LOGGER.info(
'"%s:" PM: user id %s, finished sending message',
middleware.username,
profile_id,
)
finally:
browser.close_current_tab()
@session_handler
def language_message(middleware, language, message):
"""Send message in language chat"""
browser = middleware.authentication_handler.get_browser()
try:
browser.go_to(
"https://rivalregions.com/#slide/chat/lang_{}".format(language)
)
browser.refresh()
time.sleep(2)
browser.type(message, id="message")
browser.click(id="chat_send")
LOGGER.info(
'"%s": CHAT: language %s, finished sending message',
middleware.username,
language,
)
finally:
browser.close_current_tab()
|
<filename>tumor_migration_analysis/piv_analyze_vectors.py
#!/opt/local/bin/python
"""
This script reads PIV vector data for different experiments and for each time point,
analyzes the correlation length, mean flow and speed (root mean squared velocity).
The script plots the correlation Cvv over distance delta_r and the fit to find the correlation length.
The data is also saved in .dat files.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize, signal
import pandas as pd
import utility_functions as uf
def fit_func(p,x):
return np.exp(-x/p[0]) + p[1]
def err_func(p,x,y):
return fit_func(p,x) - y
def remove_first_row_col(array):
"""Removes first array and column of a numpy array"""
array = array[1:]
array = np.transpose(np.transpose(array)[1:])
return array
def convert_nans(arr):
arr[arr == "nan"] = np.nan
arr = np.asarray(arr, dtype=float)
return arr
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def fit_corr_dist_data(delta_r_list,cvv_list):
"""Fits directional correlation vs. distance data with an exponential
Parameters
----------
delta_r_list : 1D ndarray
the distance data
cvv_list : 1D ndarray
the directional correlation data
Returns
----------
corr_len : float
the correlation length from the fit
r_squared : float
the r^2 value for the goodness of fit
"""
# filters out nan values for fitting
delta_r_list_to_fit = delta_r_list[~np.isnan(cvv_list)]
cvv_list_to_fit = cvv_list[~np.isnan(cvv_list)]
# performs the fit
if len(delta_r_list_to_fit) > 3: #only fit if there is sufficient data points
p0 = [20.,0]
p1, success = optimize.leastsq(err_func, p0, args=(delta_r_list_to_fit, cvv_list_to_fit), maxfev=1000000)
residuals = err_func(p1, delta_r_list_to_fit, cvv_list_to_fit)
ss_res = np.sum(residuals ** 2)
ss_tot = np.sum((cvv_list_to_fit - np.mean(cvv_list_to_fit)) ** 2)
r_squared = 1. - (ss_res / ss_tot)
corr_len = p1[0]
print(corr_len, r_squared)
# evaluate whether the fit worked
if corr_len == p0[0]:
p1 = [np.nan,np.nan]
r_squared = np.nan
else:
p1 = [np.nan,np.nan]
r_squared = np.nan
return p1,r_squared
def plot_corr_dist_data(ax, delta_r_list, cvv_list, p1=None):
# plots correlation data with fit
ax.plot(delta_r_list, cvv_list, 'bo', zorder=1, alpha=0.5, label="$C_{vv}$")
if p1 is not None:
x_ = np.linspace(np.max(delta_r_list), np.min(delta_r_list), 1000)
y_ = fit_func(p1, x_)
corr_len = p1[0]
try:
ax.plot(x_, y_, 'b-', zorder=0, label=r"Exp. Fit, $\xi_{vv}$ = %i $\mu$m" % (np.round(corr_len)))
except TypeError:
print("Fit unsucessful. Plotting data only.")
#finishes the plot
ax.set_xlabel(r"Distance, $\delta r$ ($\mu$m)")
ax.set_ylabel(r"Directional Correlation, $C_{vv}$")
ax.legend(loc='upper right')
return ax
def plot_corr_vs_dist_mean(ax,dist,corr_mean,corr_std,color):
"""Bins correlation vs. distance data (or any 2D data for that matter)
Parameters
----------
ax : matplotlib axis
the axis used for plotting
dist : 1D numpy array
the distance data (binned x coordinate)
corr_mean : 1D numpy array
the mean directional correlation data (binned)
corr_mean : 1D numpy array
the standard deviation of the directional correlation data (binned)
color : string
the matplotlib color used for plotting
Returns
-------
ax : matplotlib axis
the axis used for plotting
"""
#gets rid of any nans
nan_array = ~np.isnan(corr_mean)
dist = dist[nan_array]
corr_std = corr_std[nan_array]
corr_mean = corr_mean[nan_array]
#plot means
ax.plot(dist, corr_mean, 'o', color=color, zorder=2, alpha=0.7)
#plot std
fill_y_top = np.ones(len(dist))*(corr_mean+corr_std)
fill_y_bottom = np.ones(len(dist))*(corr_mean-corr_std)
ax.fill_between(dist,fill_y_top,fill_y_bottom,facecolor=color,color=color,alpha=0.3,linewidth=0,zorder=1)
ax.set_xlabel('Distance ($\mu$m)')
ax.set_ylabel('Directional Correlation')
return ax
def bin_corr_vs_dist(dist_list,corr_list,n_bins=50):
"""Bins correlation vs. distance data (or any 2D data for that matter)
Parameters
----------
dist_list : 1D list (or numpy array)
the distance data
corr_list : 1D list (or numpy array)
the directional correlation data
Returns
-------
x_vals : 1D numpy array
the binned distance data (centered on the bin)
H_means : 1D numpy array
the mean of the directional correlation at each bin
H_stds : 1D numpy array
the standard deviation of the directional correlation at each bin
H_lens : 1D numpy array
the number of data points (n) at each bin
"""
# converts to np arrays
start_dist_list = np.array(dist_list)
mean_corr_list = np.array(corr_list)
# calculates the means/SDs for the binned data
bins = np.linspace(np.min(start_dist_list), np.max(start_dist_list) + .000000001, n_bins)
bin_id = np.digitize(start_dist_list, bins)
H_means = np.array([np.nanmean(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
H_stds = np.array([np.nanstd(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
H_lens = np.array([len(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
# adjust edges
x_vals = np.array([(bins[i] + bins[i + 1]) / 2. for i in range(len(bins) - 1)])
return x_vals, H_means, H_stds, H_lens
def get_corr_dist(x,y,u,v):
"""Gets the directional correlation vs. distance for an array of vectors.
Adapted from <NAME> (2015) Maturation et mise en compétition de monocouches cellulaires (Thesis)
Parameters
----------
x : 2D ndarray
array of x coordinates
y : 2D ndarray
array of y coordinates
u : 2D ndarray
array of vectors along the x-axis
v : 2D ndarray
array of vectors along the y-axis
Returns
----------
r : 1D ndarray
the radius (distance) component
Cavg : 1D ndarray
the directional correlation over r
"""
#initializes the radius component
mesh = x[0,1] - x[0,0]
xmax,ymax = np.array(x.shape) * mesh
rmax = min([xmax,ymax])
r = np.arange(0,rmax,1)
#computes of the correlation matrix
Norm_matrix = np.ones(shape=x.shape)
Norm = signal.correlate2d(Norm_matrix,Norm_matrix)
du = u-np.nanmean(u)*np.ones(shape=u.shape)
dv = u-np.nanmean(v)*np.ones(shape=v.shape)
du[np.isnan(du)] = 0
dv[np.isnan(dv)] = 0
CorrU = signal.correlate2d(du,du)/Norm
CorrV = signal.correlate2d(dv,dv)/Norm
#computes the radial function
XX,YY = np.meshgrid(np.linspace(-xmax,xmax,CorrU.shape[1]),np.linspace(-ymax,ymax,CorrU.shape[0]))
Rho,Phi = cart2pol(XX, YY)
Cu = np.arange(0,rmax,1) * np.nan
Cv = np.arange(0,rmax,1) * np.nan
for i in range(int(round(rmax))-1):
Cu[i] = np.nanmean(CorrU[np.where(np.round(Rho) == i)])
Cv[i] = np.nanmean(CorrV[np.where(np.round(Rho) == i)])
#gets the averaged radial function
Cavg = (Cu + Cv) / (Cu[0] + Cv[0])
#plots to check
# plt.plot(r,Cavg,'bo')
# plt.show()
return r,Cavg
def analyze_vectors(stk_path):
"""Analyzes dynamics from PIV vector data.
Extracted data is automatically written to a new directory.
Parameters
----------
stk_path : string
the path to the image stack to be analyzed
"""
# finds the PIV vector data
data_dir = os.path.splitext(stk_path)[0] + "_piv_data"
if not os.path.isdir(data_dir):
raise FileNotFoundError("No PIV vector data found. Please run extraction script first.")
# makes directories for saving the analysis results
save_dir = os.path.join(data_dir,"analysis")
uf.make_dir(save_dir)
corr_dist_plot_dir = os.path.join(save_dir,"corr_dist_plots")
uf.make_dir(corr_dist_plot_dir)
# get unique basename list (from x coordinate data)
basename_list = [_[:-6] for _ in os.listdir(data_dir) if '_x.dat' in _]
basename_list = uf.natural_sort(basename_list)
# creates lists for saving the data
frame_list = []
velocity_list = []
angle_list = []
speed_list = []
corr_len_list = []
r_squared_list = []
# creates lists for making average correlation vs. distance plot
delta_r_master_list = []
cvv_master_list = []
for i, basename in enumerate(basename_list[1:]): #the 0th time point is blank
print('Analyzing frame:', basename)
x = np.array(uf.read_file(data_dir + "/" + basename + "_x.dat"),dtype=float)
y = np.array(uf.read_file(data_dir + "/" + basename + "_y.dat"),dtype=float)
U = np.array(uf.read_file(data_dir + "/" + basename + "_u.dat"),dtype=float)
V = np.array(uf.read_file(data_dir + "/" + basename + "_v.dat"),dtype=float)
# U = np.array(read_file(data_dir + "/" + basename + "_interp_u.dat"),dtype=float)
# V = np.array(read_file(data_dir + "/" + basename + "_interp_v.dat"),dtype=float)
#converts to arrays to account for nans (also convert to float arrays)
# U = convert_nans(U)
# V = convert_nans(V)
#calculates the average velocity and migration angle over the whole field
U_mean = np.nanmean(U)
V_mean = np.nanmean(V)
mean_veloc = np.linalg.norm((U_mean,V_mean)) * 60. #convert to um/hr
mean_angle = np.degrees(np.arctan2(V_mean,U_mean))
velocity_list.append(mean_veloc)
angle_list.append(mean_angle)
#calculates average speed (root mean squared velocity) over the whole field
velocities = np.linalg.norm((U,V),axis=0)
rms_velocity = np.sqrt(np.nanmean(velocities**2)) * 60. #convert to um/hr
#calculates the directional correlation vs. distance over the whole matrix and appends for averaging later
delta_r_list, cvv_list = get_corr_dist(x, y, U, V)
delta_r_master_list.append(delta_r_list)
cvv_master_list.append(cvv_list)
# fits and plots the correlation vs. distance data
p1, r_squared = fit_corr_dist_data(delta_r_list, cvv_list)
corr_len = p1[0]
fig, ax = plt.subplots(figsize=(6,4))
plot_corr_dist_data(ax, delta_r_list, cvv_list, p1)
plt.tight_layout()
plt.savefig(os.path.join(corr_dist_plot_dir,"cvv_vs_delta_r_flat_frame_%i.pdf"%(i)))
plt.close()
#saves cvv and delta_r data
data_to_save = list(zip(delta_r_list,cvv_list))
data_to_save.insert(0,["delta_r","cvv"])
uf.save_data_array(data_to_save,os.path.join(corr_dist_plot_dir,"cvv_vs_delta_r_flat_frame_%i.dat"%(i)))
#appends speed and corr_len data
frame_list.append(i)
speed_list.append(rms_velocity)
corr_len_list.append(corr_len)
r_squared_list.append(r_squared)
#saves all of the data
speed_vs_corr_len_data = list(zip(frame_list, velocity_list,
angle_list, speed_list,
corr_len_list, r_squared_list))
speed_vs_corr_len_data.insert(0, ["frame", "mean_velocity_um_per_hr",
"mean_angle_deg", "mean_speed_um_per_hr",
"corr_len_um", "r_squared"])
uf.write_csv(speed_vs_corr_len_data,os.path.join(save_dir,"piv_analysis_data.csv"))
#averages the correlation vs. distance data
dist_mean, corr_mean, corr_std, corr_n = bin_corr_vs_dist(np.ravel(np.array(delta_r_master_list)),
np.ravel(np.array(cvv_master_list)))
#adds initial points for correlation vs. distance (for plots)
dist_mean = np.insert(dist_mean,0,0.)
corr_mean = np.insert(corr_mean,0,1.)
corr_std = np.insert(corr_std,0,0.)
#plots average correrlation vs. distance data
fig, ax = plt.subplots()
ax = plot_corr_vs_dist_mean(ax, dist_mean, corr_mean, corr_std, 'C1')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, "cvv_vs_delta_r_mean.pdf"))
plt.close()
#plots histogram of RMS velocity and correlation length
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
#sets up the bins
bin_start = 0
bin_end = 16
bins = np.linspace(bin_start, bin_end, 21)
# does the plotting
n, hist, patches = ax.hist(speed_list, bins=bins)
ax.clear()
n = [_ / np.sum(n) for _ in n]
ax.bar(hist[:-1], n, color='blue', align='edge', width=hist[1] - hist[0])
ax.set_xlabel("Mean Instantaneous Speed ($\mu$m/hr)")
ax.set_ylabel("Fraction of cells")
ax.set_xlim(bin_start, bin_end)
# finishes the plot
plt.tight_layout()
plt.savefig(os.path.join(save_dir,"hist_RMS_velocity.pdf"))
plt.close()
# plots histogram of correlation length
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
# sets up the bins
bin_start = 0
bin_end = 30
bins = np.linspace(bin_start, bin_end, 21)
# does the plotting
n, hist, patches = ax.hist(corr_len_list, bins=bins)
ax.clear()
n = [_ / np.sum(n) for _ in n]
ax.bar(hist[:-1], n, color='blue', align='edge', width=hist[1] - hist[0])
ax.set_xlabel("Correlation Length ($\mu$m)")
ax.set_ylabel("Fraction of cells")
ax.set_xlim(bin_start, bin_end)
# finishes the plot
plt.tight_layout()
plt.savefig(os.path.join(save_dir,"hist_corr_len.pdf"))
plt.close()
def main():
"""Sets up the analysis of cell dynamics from the raw PIV vectors.
You should update the image path here.
You should not have to change anything in the rest of the script.
"""
stk_path = './sample_data/tumor_nuclei_small/tumor_nuclei_small.tif'
analyze_vectors(stk_path)
if __name__=="__main__":
main() |
<gh_stars>0
import csv
import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import Sequential
def datasetGen():
with open('simdata/driving_log.csv') as log_file:
log_reader = csv.DictReader(log_file)
X = []
y = []
steering_offset = 0.4
for row in log_reader:
centerImage = mpimg.imread(row['center'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))
flippedCenterImage = np.fliplr(centerImage)
centerSteering = float(row['steering'])
leftImage = mpimg.imread(row['left'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))
flippedLeftImage = np.fliplr(leftImage)
leftSteering = centerSteering + steering_offset
rightImage = mpimg.imread(row['right'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))
flippedRightImage = np.fliplr(rightImage)
rightSteering = centerSteering - steering_offset
X.append(centerImage)
X.append(flippedCenterImage)
X.append(leftImage)
X.append(flippedLeftImage)
X.append(rightImage)
X.append(flippedRightImage)
y.append(centerSteering)
y.append(-centerSteering)
y.append(leftSteering)
y.append(-leftSteering)
y.append(rightSteering)
y.append(-rightSteering)
X = np.array(X)
y = np.array(y)
return X, y
def model():
model = Sequential()
model.add(layers.Conv2D(8,
kernel_size=(5, 5),
strides=(2, 2),
activation='relu',
input_shape=(160, 320, 3),
padding='same'))
model.add(layers.Conv2D(16,
kernel_size=(5, 5),
strides=(2, 2),
activation='relu',
padding='valid'))
model.add(layers.AveragePooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='valid'))
model.add(layers.Conv2D(32,
kernel_size=(5, 5),
strides=(2, 2),
activation='relu',
padding='valid'))
model.add(layers.Conv2D(32,
kernel_size=(3, 3),
strides=(2, 2),
activation='relu',
padding='valid'))
model.add(layers.AveragePooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='valid'))
model.add(layers.Conv2D(64,
kernel_size=(3, 3),
strides=(1, 1),
activation='relu',
padding='valid'))
model.add(layers.Conv2D(64,
kernel_size=(3, 3),
strides=(1, 1),
activation='relu',
padding='valid'))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='linear'))
model.add(layers.Dense(512, activation='linear'))
model.add(layers.Dense(64, activation='linear'))
model.add(layers.Dense(8, activation='linear'))
model.add(layers.Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
return model
X, y = datasetGen()
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33, random_state=42)
model = model()
model.summary()
model.fit(X_train, y_train,
epochs=8,
batch_size=512,
validation_data=(X_valid, y_valid))
model.save('model.h5') |
#!/usr/bin/env python3
from __future__ import print_function
import sys, re
import os
import glob
import hashlib
def bytehex(x):
return ''.join('{:02x}'.format(x) for x in x)
def wc_for_iteration(todo_dir, fni):
with open("%s/%u_all.txt" % (todo_dir, fni), "rb") as f:
return sum(1 for _ in f)
def check_made_progress(todo_dir, max_iter, min_progress):
""" Returns true if minimum progress is being made. """
if max_iter == 1:
return True
prev_iteration = wc_for_iteration(todo_dir, max_iter - 1)
cur_iteration = wc_for_iteration(todo_dir, max_iter)
made_progress = prev_iteration - cur_iteration > min_progress
if not made_progress:
print(
"Between iteration %d and iteration %d only %d pips were solved. Terminating iteration."
.format(max_iter - 1, max_iter, prev_iteration - cur_iteration))
return made_progress
def run(
todo_dir,
min_iters=None,
min_progress=None,
timeout_iters=None,
max_iters=None,
zero_entries=None,
zero_entries_filter=".*",
verbose=False):
timeout_fn = "%s/timeout" % todo_dir
# make clean removes todo dir, but helps debugging
if os.path.exists(timeout_fn):
print("WARNING: removing %s" % timeout_fn)
os.remove(timeout_fn)
alls = glob.glob("%s/*_all.txt" % todo_dir)
max_iter = 0
for fn in alls:
n = int(re.match(r".*/([0-9]*)_all.txt", fn).group(1))
max_iter = max(max_iter, n)
if max_iter == 0:
print("Incomplete: no iters")
sys.exit(1)
verbose and print("Max iter: %u, need: %s" % (max_iter, min_iters))
# Don't allow early termination if below min_iters
if min_iters is not None and max_iter < min_iters:
print("Incomplete: not enough iters")
sys.exit(1)
# Force early termination if at or above max_iters.
if max_iters is not None and max_iter >= max_iters:
print(
"Complete: reached max iters (want %u, got %u)" %
(max_iters, max_iter))
sys.exit(0)
# Mark timeout if above timeout_iters
if timeout_iters is not None and max_iter > timeout_iters:
print("ERROR: timeout (max %u, got %u)" % (timeout_iters, max_iter))
with open(timeout_fn, "w") as _f:
pass
sys.exit(1)
# Check if zero entries criteria is not met.
if zero_entries:
filt = re.compile(zero_entries_filter)
count = 0
fn = "%s/%u_all.txt" % (todo_dir, max_iter)
with open(fn, 'r') as f:
for l in f:
if filt.search(l):
count += 1
if count > 0:
print("%s: %s lines" % (fn, count))
print(
"Incomplete: need zero entries (used filter: {})".format(
repr(zero_entries_filter)))
sys.exit(1)
else:
# If there are zero entries, check if min_progress criteria is in
# affect. If so, that becomes the new termination condition.
if min_progress is None:
print(
"No unfiltered entries, done (used filter: {})!".format(
repr(zero_entries_filter)))
sys.exit(0)
else:
# Even if there are 0 unfiltered entries, fuzzer may still be
# making progress with filtered entries.
print(
"No unfiltered entries (used filter: {}), checking if progress is being made"
.format(repr(zero_entries_filter)))
# Check if minimum progress was achieved, continue iteration if so.
if min_progress is not None and not check_made_progress(todo_dir, max_iter,
min_progress):
sys.exit(0)
print("No exit criteria met, keep going!")
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(
description=
"Check int_loop completion. Exits 0 on done, 1 if more loops are needed"
)
parser.add_argument('--verbose', action='store_true', help='')
parser.add_argument('--todo-dir', default="build/todo", help='')
parser.add_argument(
'--min-iters', default=None, help='Minimum total number of iterations')
parser.add_argument(
'--min-progress',
default=None,
help=
'Minimum amount of process between iterations. If less progress is made, terminates immediately.'
)
parser.add_argument(
'--timeout-iters',
default=None,
help='Max number of entries before creating todo/timeout')
parser.add_argument(
'--max-iters',
default=None,
help='Max number of entries before declaring success')
parser.add_argument(
'--zero-entries',
action="store_true",
help='Must be no unsolved entries in latest')
parser.add_argument(
'--zero-entries-filter',
default=".*",
help=
'When zero-entries is supplied, this filter is used to filter pips used for counting against zero entries termination condition.'
)
args = parser.parse_args()
def zint(x):
return None if x is None else int(x)
run(
todo_dir=args.todo_dir,
min_iters=zint(args.min_iters),
min_progress=zint(args.min_progress),
timeout_iters=zint(args.timeout_iters),
max_iters=zint(args.max_iters),
zero_entries=args.zero_entries,
zero_entries_filter=args.zero_entries_filter,
verbose=args.verbose)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import functools
import urwid
import pyperclip
from components import StyledButton, OkDialog, OkCancelDialog, Dialog
from crypto.ninja import EncryptedImageNinja
from crypto.vault import ImageVault, Password
def close_app(*args):
raise urwid.ExitMainLoop()
palette = [
('banner', 'dark red', ''),
('reversed', 'standout', ''),
]
logo = urwid.Pile(
[
urwid.Padding(
urwid.BigText(('banner', "PSWD SNITCH"), urwid.Thin6x6Font()),
width="clip",
align=urwid.CENTER,
),
urwid.Divider(),
]
)
background = urwid.AttrMap(urwid.SolidFill('.'), 'bg')
class PasswordEditDialog(urwid.WidgetWrap):
def __init__(
self, parent, loop: urwid.MainLoop, password: Password, on_save: callable
):
self.loop = loop
self.password = password
self.on_save = on_save
self.name_edit = urwid.Edit(edit_text=self.password.name, wrap=urwid.CLIP)
self.login_edit = urwid.Edit(edit_text=self.password.login, wrap=urwid.CLIP)
self.password_edit = urwid.Edit(mask="*", wrap=urwid.CLIP)
body = urwid.ListBox(
urwid.SimpleFocusListWalker(
[
urwid.Text("Name:"),
urwid.LineBox(self.name_edit),
urwid.Text("Login:"),
urwid.LineBox(self.login_edit),
urwid.Text("Password:"),
urwid.LineBox(self.password_edit),
urwid.Columns([
StyledButton("Cancel", on_press=self.close),
StyledButton("Save", on_press=self.save),
]),
]
)
)
widget = urwid.Overlay(
Dialog(body, message=self.password.name, title="Edit"),
parent,
align=urwid.CENTER,
valign=urwid.MIDDLE,
width=50,
height=20,
)
super().__init__(widget)
def save(self, *args):
if not self.name_edit.get_edit_text():
self.loop.widget = OkDialog(
self, self.loop, "Name cannot be empty!", "Error!"
)
elif not self.login_edit.get_edit_text():
self.loop.widget = OkDialog(
self, self.loop, "Login cannot be empty!", "Error!"
)
elif not self.password_edit.get_edit_text():
self.loop.widget = OkDialog(
self, self.loop, "Password cannot be empty!", "Error!"
)
else:
self.password.name = self.name_edit.get_edit_text()
self.password.login = self.login_edit.get_edit_text()
self.password.passphrase = self.password_edit.get_edit_text()
self.on_save(self.password)
self.close()
def close(self, *args):
self.loop.widget = self._w.bottom_w
class LoginScreen(urwid.WidgetWrap):
def __init__(
self, loop: urwid.MainLoop, password_check: callable, on_success: callable
):
self.loop = loop
self.password_check = password_check
self.on_success = on_success
self.password_edit = urwid.Edit(
align=urwid.CENTER, multiline=False, wrap=urwid.CLIP, mask="*"
)
body = urwid.ListBox(
urwid.SimpleFocusListWalker(
[
urwid.Text("Enter password:", align=urwid.CENTER),
urwid.LineBox(self.password_edit),
]
)
)
login_box = urwid.LineBox(
urwid.Padding(urwid.Frame(header=logo, body=body), left=2, right=2)
)
widget = urwid.Overlay(
login_box,
background,
align=urwid.CENTER,
valign=urwid.MIDDLE,
width=70,
height=14,
)
super().__init__(widget)
self.wrong_password = OkDialog(
self, self.loop, message="Wrong password!", title="Error!"
)
def keypress(self, size, key):
if key == "enter":
passphrase = self.password_edit.get_edit_text()
if self.password_check(passphrase):
self.on_success(passphrase)
else:
self.password_edit.set_edit_text("")
self.loop.widget = self.wrong_password
super().keypress(size, key)
class PasswordsScreen(urwid.WidgetWrap):
def __init__(self, loop: urwid.MainLoop, vault: ImageVault):
self.loop = loop
self.vault = vault
footer = urwid.Pile(
[
urwid.Divider(),
urwid.Columns(
[
urwid.Text("Q: Quit", align=urwid.CENTER),
urwid.Text("C: Clipboard", align=urwid.CENTER),
urwid.Text("A: Add", align=urwid.CENTER),
urwid.Text("S: Save", align=urwid.CENTER),
]
),
]
)
self.passwords_list_box = urwid.ListBox(urwid.SimpleFocusListWalker([]))
self.setup_password_buttons()
main = urwid.LineBox(
urwid.Padding(
urwid.Frame(header=logo, body=self.passwords_list_box, footer=footer),
left=2,
right=2,
)
)
widget = urwid.Overlay(
main,
background,
align=urwid.CENTER,
valign=urwid.MIDDLE,
width=80,
height=(urwid.RELATIVE, 60),
min_height=10,
)
super().__init__(widget)
self.exit_dialog = OkCancelDialog(
self, self.loop, "Exit?", title="", on_ok=close_app
)
def setup_password_buttons(self):
def edit_password(index, button):
password = self.vault.passwords[index]
on_save = functools.partial(self.save_password, index=index)
self.loop.widget = PasswordEditDialog(
self, self.loop, password, on_save=on_save
)
self.passwords_list_box._set_body(
[
StyledButton(str(p), on_press=functools.partial(edit_password, i))
for i, p in enumerate(self.vault.passwords)
]
)
def keypress(self, size, key):
if key == "c":
password = self.vault.passwords[self.passwords_list_box.focus_position]
pyperclip.copy(password.passphrase)
self.loop.widget = OkDialog(
self,
self.loop,
message=f"Copied password to clipboard!",
title=str(password),
)
elif key in ("q", "Q"):
self.loop.widget = self.exit_dialog
elif key in ("a", "A"):
self.loop.widget = PasswordEditDialog(
self, self.loop, Password("", "", ""), self.save_password
)
elif key in ("s", "Save"):
self.vault.save()
self.loop.widget = OkDialog(
self, self.loop, message="Vault saved!", title="Success!"
)
super().keypress(size, key)
def save_password(self, password: Password, *, index: bool = None):
if index is None:
self.vault.passwords.append(password)
else:
self.vault.passwords[index] = password
self.setup_password_buttons()
class Application:
def __init__(self, path: str, for_write: bool):
self.path = path
self.for_write = for_write
self.main_view = None
self.loop = urwid.MainLoop(None, palette=palette)
def password_check(password: str) -> bool:
if self.for_write:
return True
try:
ImageVault(self.path, password=password)
return True
except EncryptedImageNinja.InvalidPassword:
return False
def on_success(password):
self.main_view = PasswordsScreen(
self.loop, ImageVault(self.path, password, self.for_write)
)
self.loop.widget = self.main_view
self.login_screen = LoginScreen(
self.loop, password_check=password_check, on_success=on_success
)
self.loop.widget = self.login_screen
def run(self):
self.loop.run()
if __name__ == "__main__":
try:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--new", action="store_true")
parser.add_argument("image", type=argparse.FileType())
args = parser.parse_args()
Application(args.image.name, for_write=bool(args.new)).run()
except KeyboardInterrupt:
pass
|
<filename>prologGeneral.py<gh_stars>1-10
import re
import subprocess
from util import SilentLimitedBuffer
testfileName = '/tmp/tmp-testfile.pl'
plTestfile = re.compile(testfileName.replace(".", "\\.") + r"(:[0-9]*)?:?")
plStatus = re.compile(r"^[A.!+-]+$")
plResult = re.compile(r"^(ERROR|Warning): (.*)")
plDone = re.compile(r"done$")
plInfo = re.compile(r"^(ERROR: {5}|\t)(.*)")
plBeginTest = re.compile(r":- +begin_tests\(([^,]*)(,.*)?\)")
plEndTest = re.compile(r":- +end_tests\((.*)\)")
plComment = re.compile(r"%!(.*)")
plMountdir = re.compile(r"/mnt/[^/]*/")
def removePath(s: str, testname: str):
"""Removes the path to the test file from the output
Arguments:
s {str} -- Text to clean
Returns:
str -- The cleaned text
"""
return (re.sub(plTestfile, "", removeMountDir(s))
.replace("plunit_" + testname + ":", ""))
def removeMountDir(s: str):
return re.sub(plMountdir, "", s)
def analyse(errorType, data, errors):
"""Adds errors to the error array
Arguments:
errorType {str} -- The tye of the error
data {list(str)} -- a list of lines of info
errors {list(dict)} -- list to append to
"""
if errorType is None:
errorType = "compilation"
if data:
n = '\n'
d = n.join(data)
a = {"accepted": False, "description": errorType, "messages": [
{"format": "code", "description": d, "permission": "student"}]}
errors.append(a)
def checkErrors(lines, testname):
errorType = None
data = []
testcases = []
for line in lines:
line = removePath(line.rstrip(), testname)
isStatus = plStatus.match(line)
if isStatus:
analyse(errorType, data, testcases)
data = []
else:
isResult = plResult.match(line)
isInfo = plInfo.match(line)
if isInfo:
data.append(isInfo.group(2))
elif isResult:
analyse(errorType, data, testcases)
moreinfo = isResult.group(2).strip()
if moreinfo:
data = [moreinfo]
else:
data = []
errorType = isResult.group(1)
else:
data.append(line)
analyse(errorType, data, testcases)
return testcases
def swipl(scriptfile, testname, goal, outputHandler, timeout, config, bufsize=2500, removeMounts=True):
testcases = []
runner = subprocess.Popen(
['swipl',
'-t', goal,
'--quiet=yes',
'--tty=yes',
'--signals=no',
"--stack_limit=" + config["prolog_stack_limit"],
scriptfile,
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=None,
cwd=config["workdir"])
# Keep reading bufffer while only saving the top bufsize bytes
stdBuf = SilentLimitedBuffer(maxsize=bufsize).procces(runner.stderr)
errBuf = SilentLimitedBuffer(maxsize=bufsize).procces(runner.stdout)
didTimeout = True
try:
runner.wait(timeout=timeout)
didTimeout = False
except subprocess.TimeoutExpired:
runner.terminate()
resStdOut = stdBuf.retreive_and_stop()
resStdErr = errBuf.retreive_and_stop()
runner.stderr.close()
runner.stdout.close()
# Clean output and split in lines
if removeMounts:
resStdOut = removeMountDir(resStdOut).splitlines(True)
resStdErr = removeMountDir(resStdErr).splitlines(True)
else:
resStdOut = resStdOut.splitlines(True)
resStdErr = resStdErr.splitlines(True)
# import sys
# print("CAL", scriptfile, file=sys.stderr)
# print("STD", "".join(resStdOut), file=sys.stderr)
# print("ERR", "".join(resStdErr), file=sys.stderr)
testcases += outputHandler(
stdout=resStdOut,
stderr=resStdErr,
testname=testname,
scriptfile=scriptfile,
config=config,
timeout=didTimeout
)
return testcases
|
from __future__ import absolute_import
import numpy as np
import os
import pytest
from mirdata import medleydb_pitch, utils
from tests.test_utils import mock_validated, mock_validator, DEFAULT_DATA_HOME
def test_track():
# test data home None
track_default = medleydb_pitch.Track('AClassicEducation_NightOwl_STEM_08')
assert track_default._data_home == os.path.join(DEFAULT_DATA_HOME, 'MedleyDB-Pitch')
data_home = 'tests/resources/mir_datasets/MedleyDB-Pitch'
with pytest.raises(ValueError):
medleydb_pitch.Track('asdfasdf', data_home=data_home)
track = medleydb_pitch.Track(
'AClassicEducation_NightOwl_STEM_08', data_home=data_home
)
# test attributes
assert track.track_id == 'AClassicEducation_NightOwl_STEM_08'
assert track._data_home == data_home
assert track._track_paths == {
'audio': [
'audio/AClassicEducation_NightOwl_STEM_08.wav',
'6cfb976517cf377863ba0ef6c66c6a07',
],
'pitch': [
'pitch/AClassicEducation_NightOwl_STEM_08.csv',
'67009ae37766c37d3c29146bf763e06d',
],
}
assert (
track.audio_path
== 'tests/resources/mir_datasets/'
+ 'MedleyDB-Pitch/audio/AClassicEducation_NightOwl_STEM_08.wav'
)
assert track.instrument == 'male singer'
assert track.artist == 'AClassicEducation'
assert track.title == 'NightOwl'
assert track.genre == 'Singer/Songwriter'
assert type(track.pitch) is utils.F0Data
y, sr = track.audio
assert sr == 44100
assert y.shape == (44100 * 2,)
repr_string = (
"MedleyDb-Pitch Track(track_id=AClassicEducation_NightOwl_STEM_08, "
+ "audio_path=tests/resources/mir_datasets/MedleyDB-Pitch/audio/"
+ "AClassicEducation_NightOwl_STEM_08.wav, "
+ "artist=AClassicEducation, title=NightOwl, genre=Singer/Songwriter, "
+ "instrument=male singer, pitch=PitchData('times', 'pitches', 'confidence'))"
)
assert track.__repr__() == repr_string
def test_track_ids():
track_ids = medleydb_pitch.track_ids()
assert type(track_ids) is list
assert len(track_ids) == 103
def test_load():
data_home = 'tests/resources/mir_datasets/MedleyDB-Pitch'
medleydb_pitch_data = medleydb_pitch.load(
data_home=data_home, silence_validator=True
)
assert type(medleydb_pitch_data) is dict
assert len(medleydb_pitch_data.keys()) is 103
medleydb_pitch_data_default = medleydb_pitch.load(silence_validator=True)
assert type(medleydb_pitch_data_default) is dict
assert len(medleydb_pitch_data_default.keys()) is 103
def test_load_pitch():
# load a file which exists
pitch_path = (
'tests/resources/mir_datasets/MedleyDB-Pitch/'
+ 'pitch/AClassicEducation_NightOwl_STEM_08.csv'
)
pitch_data = medleydb_pitch._load_pitch(pitch_path)
# check types
assert type(pitch_data) == utils.F0Data
assert type(pitch_data.times) is np.ndarray
assert type(pitch_data.frequencies) is np.ndarray
assert type(pitch_data.confidence) is np.ndarray
# check values
assert np.array_equal(
pitch_data.times, np.array([0.06965986394557823, 0.07546485260770976])
)
assert np.array_equal(pitch_data.frequencies, np.array([0.0, 191.877]))
assert np.array_equal(pitch_data.confidence, np.array([0.0, 1.0]))
# load a file which doesn't exist
pitch_data_none = medleydb_pitch._load_pitch('fake/file/path')
assert pitch_data_none is None
def test_load_metadata():
data_home = 'tests/resources/mir_datasets/MedleyDB-Pitch'
metadata = medleydb_pitch._load_metadata(data_home)
assert metadata['data_home'] == data_home
assert metadata['AClassicEducation_NightOwl_STEM_08'] == {
'audio_path': 'MedleyDB-Pitch/audio/AClassicEducation_NightOwl_STEM_08.wav',
'pitch_path': 'MedleyDB-Pitch/pitch/AClassicEducation_NightOwl_STEM_08.csv',
'instrument': 'male singer',
'artist': 'AClassicEducation',
'title': 'NightOwl',
'genre': 'Singer/Songwriter',
}
metadata_none = medleydb_pitch._load_metadata('asdf/asdf')
assert metadata_none is None
def test_cite():
medleydb_pitch.cite()
@pytest.fixture
def mock_validate(mocker):
return mocker.patch.object(medleydb_pitch, 'validate')
@pytest.fixture
def data_home(tmpdir):
return str(tmpdir)
def test_validate_valid(data_home, mocker, mock_validator):
mock_validator.return_value = (False, False)
missing_files, invalid_checksums = medleydb_pitch.validate(data_home)
assert not (missing_files or invalid_checksums)
mock_validator.assert_called_once()
|
#coding=utf-8
''' '''
'''
XPath 是一门语言
XPath可以在XML文档中查找信息
XPath支持HTML
XPath通过元素和属性进行导航
XPath可以用来提取信息
XPath比正则表达式厉害
XPath比正则表达式简单
'''
'''
安装lxml库
from lxml import etree
Selector = etree.HTML(网页源代码)
Selector.xpath(一段神奇的符号)
'''
'''
树状结构
逐层展开
逐层定位
寻找独立节点
手动分析法
Chrome生成法
'''
'''
语法:
//定位根节点
/往下层寻找
提取文本内容:/text()
提取属性内容: /@xxxx
nodename 选取此节点的所有子节点。
/ 从根节点选取。
// 从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置。
. 选取当前节点。
.. 选取当前节点的父节点。
@ 选取属性。
* 匹配任何元素节点。
@* 匹配任何属性节点。
node() 匹配任何类型的节点。
绝对位置路径:
/step/step/...
相对位置路径:
step/step/...
bookstore 选取 bookstore 元素的所有子节点。
/bookstore
选取根元素 bookstore。
注释:假如路径起始于正斜杠( / ),则此路径始终代表到某元素的绝对路径!
bookstore/book 选取属于 bookstore 的子元素的所有 book 元素。
//book 选取所有 book 子元素,而不管它们在文档中的位置。
bookstore//book 选择属于 bookstore 元素的后代的所有 book 元素,而不管它们位于 bookstore 之下的什么位置。
//@lang 选取名为 lang 的所有属性。
1、以相同的字符开头
starts-with(@属性名称, 属性字符相同部分)
<div id="test-1">需要的内容1</div>
<div id="test-2">需要的内容2</div>
<div id="testfault">需要的内容3</div>
2、标签套标签
string(.)
<div id=“class3”>美女,
<font color=red>你的微信是多少?</font>
</div>
'''
'''
Python并行化介绍
Map的使用
map 函数一手包办了序列操作、参数传递和结果保存等一系列的操作。
from multiprocessing.dummy import Pool
pool = Pool(4)
results = pool.map(爬取函数, 网址列表)
'''
'''
实战:
目标网站:http://tieba.baidu.com/p/3522395718
目标内容:跟帖用户名,跟帖内容,跟帖时间
涉及知识:
Requests获取网页
XPath提取内容
map实现多线程爬虫
'''
from lxml import etree
html = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title>测试-常规用法</title>
<a href="http://jikexueyuan.com1">极客学院1</a>
</head>
<body>
<div id="content">
<ul id="useful">
<a href="http://jikexueyuan.com2">极客学院2</a>
<li>这是第一条信息</li>
<li>这是第二条信息</li>
<li>这是第三条信息</li>
</ul>
<ul id="useless">
<li>不需要的信息1</li>
<li>不需要的信息2</li>
<li>不需要的信息3</li>
</ul>
<div id="url">
<a href="http://jikexueyuan.com3">极客学院3</a>
<a href="http://jikexueyuan.com/course/4" title="极客学院课程库">点我打开课程库</a>
</div>
</div>
<div id="content">
<ul id="useful">
<a href="http://jikexueyuan.com5">极客学院4</a>
<li>这是第一条信息2</li>
<li>这是第二条信息2</li>
<li>这是第三条信息2</li>
</ul>
<ul id="useless">
<a href="http://jikexueyuan.com6">极客学院5</a>
<li>不需要的信息12</li>
<li>不需要的信息22</li>
<li>不需要的信息32</li>
</ul>
<div id="url">
<a href="http://jikexueyuan.com7">极客学院</a>
<a href="http://jikexueyuan.com/course/8" title="极客学院课程库">点我打开课程库</a>
</div>
</div>
</body>
</html>
'''
selector = etree.HTML(html)
#提取文本
content = selector.xpath('//ul[@id="useful"]/li/text()')
for each in content:
print each
#提取属性
link = selector.xpath('//a/@href')
for each in link:
print each
title = selector.xpath('//a/@title')
print title[0]
html1 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test-1">需要的内容1</div>
<div id="test-2">需要的内容2</div>
<div id="testfault">需要的内容3</div>
</body>
</html>
'''
html2 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test3">
我左青龙,
<span id="tiger">
右白虎,
<ul>上朱雀,
<li>下玄武。</li>
</ul>
老牛在当中,
</span>
龙头在胸口。
</div>
</body>
</html>
'''
print "\n\n\n"
selector = etree.HTML(html1)
content = selector.xpath('//div[starts-with(@id,"test")]/text()')
for each in content:
print each
selector = etree.HTML(html2)
content_1 = selector.xpath('//div[@id="test3"]/text()')
for each in content_1:
print each
data = selector.xpath('//div[@id="test3"]')[0]
info = data.xpath('string(.)')
content_2 = info.replace('\n','').replace(' ','')
print content_2
|
import gzip
import importlib
import logging
import uuid
import zlib
import six
from six.moves import urllib
from . import packet
from . import payload
from . import socket
class Server(object):
"""An Engine.IO server.
This class implements a fully compliant Engine.IO web server with support
for websocket and long-polling transports.
:param async_mode: The library used for asynchronous operations. Valid
options are "threading", "eventlet" and "gevent". If
this argument is not given, "eventlet" is tried first,
then "gevent", and finally "threading". The websocket
transport is only supported in "eventlet" mode
and "gevent" mode if gevent-websocket is available.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting.
:param ping_interval: The interval in seconds at which the client pings
the server.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport.
:param allow_upgrades: Whether to allow transport upgrades or not.
:param http_compression: Whether to compress packages when using the
polling transport.
:param compression_threshold: Only compress messages when their byte size
is greater than this value.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``.
"""
compression_methods = ['gzip', 'deflate']
event_names = ['connect', 'disconnect', 'message']
def __init__(self, async_mode=None, ping_timeout=60, ping_interval=25,
max_http_buffer_size=100000000, allow_upgrades=True,
http_compression=True, compression_threshold=1024,
cookie='io', cors_allowed_origins=None,
cors_credentials=True, logger=False):
self.ping_timeout = ping_timeout
self.ping_interval = ping_interval
self.max_http_buffer_size = max_http_buffer_size
self.allow_upgrades = allow_upgrades
self.http_compression = http_compression
self.compression_threshold = compression_threshold
self.cookie = cookie
self.cors_allowed_origins = cors_allowed_origins
self.cors_credentials = cors_credentials
self.sockets = {}
self.handlers = {}
if not isinstance(logger, bool):
self.logger = logger
else:
logging.basicConfig()
self.logger = logging.getLogger('engineio')
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
async = None
if async_mode is None or async_mode == 'eventlet':
try:
async = importlib.import_module('engineio.async.eventlet')
async_mode = 'eventlet'
except ImportError:
if async_mode == 'eventlet':
raise
if async_mode is None or async_mode == 'gevent':
try:
async = importlib.import_module('engineio.async.gevent')
async_mode = 'gevent'
except ImportError:
if async_mode == 'gevent':
raise
if async_mode is None or async_mode == 'threading':
async = importlib.import_module('engineio.async.threading')
async_mode = 'threading'
if async is None:
raise ValueError('Invalid async_mode specified')
self.async_mode = async_mode
self.async = async
self.logger.info('Server initialized for %s.', self.async_mode)
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler(sid, environ):
print('Connection request')
if environ['REMOTE_ADDR'] in blacklisted:
return False # reject
# as a method:
def message_handler(sid, msg):
print('Received message: ', msg)
eio.send(sid, 'response')
eio.on('message', message_handler)
The handler function receives the ``sid`` (session ID) for the
client as first argument. The ``'connect'`` event handler receives the
WSGI environment as a second argument, and can return ``False`` to
reject the connection. The ``'message'`` handler receives the message
payload as a second argument. The ``'disconnect'`` handler does not
take a second argument.
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def send(self, sid, data, binary=None):
"""Send a message to a client.
:param sid: The session id of the recipient client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._get_socket(sid).send(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, sid=None):
"""Disconnect a client.
:param sid: The session id of the client to close. If this parameter
is not given, then all clients are closed.
"""
if sid is not None:
self._get_socket(sid).close()
del self.sockets[sid]
else:
for client in six.itervalues(self.sockets):
client.close()
self.sockets = {}
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
if 'j' in query:
self.logger.warning('JSONP requests are not supported')
r = self._bad_request()
else:
sid = query['sid'][0] if 'sid' in query else None
b64 = query['b64'][0] if 'b64' in query else False
if method == 'GET':
if sid is None:
r = self._handle_connect(environ)
else:
if sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
packets = socket.handle_get_request(
environ, start_response)
r = self._ok(packets, b64=b64)
except IOError:
del self.sockets[sid]
r = self._bad_request()
elif method == 'POST':
if sid is None or sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
socket.handle_post_request(environ)
r = self._ok()
except ValueError:
r = self._bad_request()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
start_response(r['status'], r['headers'] + cors_headers)
return [r['response']]
def _generate_id(self):
"""Generate a unique session id."""
return uuid.uuid4().hex
def _handle_connect(self, environ):
"""Handle a client connection request."""
sid = self._generate_id()
s = socket.Socket(self, sid)
self.sockets[sid] = s
pkt = packet.Packet(
packet.OPEN, {'sid': sid,
'upgrades': self._upgrades(sid),
'pingTimeout': int(self.ping_timeout * 1000),
'pingInterval': int(self.ping_interval * 1000)})
s.send(pkt)
if self._trigger_event('connect', sid, environ) is False:
self.logger.warning('Application rejected connection')
del self.sockets[sid]
return self._unauthorized()
headers = None
if self.cookie:
headers = [('Set-Cookie', self.cookie + '=' + sid)]
return self._ok(s.poll(), headers=headers)
def _upgrades(self, sid):
"""Return the list of possible upgrades for a client connection."""
if not self.allow_upgrades or self._get_socket(sid).upgraded or \
not self.async.has_websocket:
return []
return ['websocket']
def _trigger_event(self, event, *args):
"""Invoke an event handler."""
if event in self.handlers:
return self.handlers[event](*args)
def _get_socket(self, sid):
"""Return the socket object for a given session."""
try:
s = self.sockets[sid]
except KeyError:
raise KeyError('Session not found')
if s.closed:
del self.sockets[sid]
raise KeyError('Session is disconnected')
return s
def _ok(self, packets=None, headers=None, b64=False):
"""Generate a successful HTTP response."""
if packets is not None:
if headers is None:
headers = []
headers += [('Content-Type', 'application/octet-stream')]
return {'status': '200 OK',
'headers': headers,
'response': payload.Payload(packets=packets).encode(b64)}
else:
return {'status': '200 OK',
'headers': [('Content-Type', 'text/plain')],
'response': b'OK'}
def _bad_request(self):
"""Generate a bad request HTTP error response."""
return {'status': '400 BAD REQUEST',
'headers': [('Content-Type', 'text/plain')],
'response': b'Bad Request'}
def _method_not_found(self):
"""Generate a method not found HTTP error response."""
return {'status': '405 METHOD NOT FOUND',
'headers': [('Content-Type', 'text/plain')],
'response': b'Method Not Found'}
def _unauthorized(self):
"""Generate a unauthorized HTTP error response."""
return {'status': '401 UNAUTHORIZED',
'headers': [('Content-Type', 'text/plain')],
'response': b'Unauthorized'}
def _cors_headers(self, environ):
"""Return the cross-origin-resource-sharing headers."""
if self.cors_allowed_origins is not None and \
environ.get('ORIGIN', '') not in self.cors_allowed_origins:
return []
if 'ORIGIN' in environ:
headers = [('Access-Control-Allow-Origin', environ['ORIGIN'])]
else:
headers = [('Access-Control-Allow-Origin', '*')]
if self.cors_credentials:
headers += [('Access-Control-Allow-Credentials', 'true')]
return headers
def _gzip(self, response):
"""Apply gzip compression to a response."""
bytesio = six.BytesIO()
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
gz.write(response)
return bytesio.getvalue()
def _deflate(self, response):
"""Apply deflate compression to a response."""
return zlib.compress(response)
|
<reponame>ashwinahuja/HowQuicklyCanWeGetBackToThePub
import numpy as np
from .. import config, utils
from ..case import CaseFactors
from . import registry
from .common import _limit_contact, RETURN_KEYS
@registry("delve")
def delve(case, contacts, rng, **kwds):
strategy_factors = utils.get_sub_dictionary(kwds, config.DELVE_STRATEGY_FACTOR_KEYS)
strategy = TTIFlowModel(rng, **strategy_factors)
factor_cfg = utils.get_sub_dictionary(kwds, config.DELVE_CASE_FACTOR_KEYS)
case_factors = CaseFactors.simulate_from(rng, case, **factor_cfg)
metrics = strategy(case, contacts, case_factors)
return metrics
# TODO: turn all these comments into (google style) docstring
class TTIFlowModel:
"""
This is an implementation of flowchart produced by <NAME> and <NAME>
"""
def __init__(
self,
rng,
isolate_individual_on_symptoms, # Isolate the individual after they present with symptoms
isolate_individual_on_positive, # Isolate the individual after they test positive
isolate_household_on_symptoms, # Isolate the household after individual present with symptoms
isolate_household_on_positive, # Isolate the household after individual test positive
isolate_contacts_on_symptoms, # Isolate the contacts after individual present with symptoms
isolate_contacts_on_positive, # Isolate the contacts after individual test positive
test_contacts_on_positive, # Do we test contacts of a positive case immediately, or wait for them to develop symptoms
do_symptom_testing, # Test symptomatic individuals
app_cov, # Probability of tracing contact through app
testing_delay, # Delay between test and results
do_manual_tracing, # Perform manual tracing of contacts
do_app_tracing, # Perform app tracing of contacts
app_trace_delay, # Delay associated with tracing through the app
manual_trace_delay, # Delay associated with tracing manually
manual_home_trace_prob, # Probability of manually tracing a home contact
manual_work_trace_prob, # Probability of manually tracing a work contact
manual_othr_trace_prob, # Probability of manually tracing an other contact
met_before_w, # Probability of having met a work contact before to be able to manually trace
met_before_s, # Probability of having met a school contact before to be able to manually trace
met_before_o, # Probability of having met a other contact before to be able to manually trace
max_contacts, # Place a limit on the number of other contacts per day
fractional_infections, # Include infected but traced individuals as a fraction of their infection period not isolated
quarantine_length, # Length of quarantine imposed on COVID cases (and household)
compliance,
latent_period, # Length of a cases incubation period (from infection to start of infectious period)
):
self.rng = rng
self.isolate_individual_on_symptoms = isolate_individual_on_symptoms
self.isolate_individual_on_positive = isolate_individual_on_positive
self.isolate_household_on_symptoms = isolate_household_on_symptoms
self.isolate_household_on_positive = isolate_household_on_positive
self.isolate_contacts_on_symptoms = isolate_contacts_on_symptoms
self.isolate_contacts_on_positive = isolate_contacts_on_positive
self.test_contacts_on_positive = test_contacts_on_positive
self.do_symptom_testing = do_symptom_testing
self.app_cov = app_cov
self.testing_delay = testing_delay
self.do_manual_tracing = do_manual_tracing
self.do_app_tracing = do_app_tracing
self.app_trace_delay = app_trace_delay
self.manual_trace_delay = manual_trace_delay
self.manual_home_trace_prob = manual_home_trace_prob
self.manual_work_trace_prob = manual_work_trace_prob
self.manual_othr_trace_prob = manual_othr_trace_prob
self.met_before_w = met_before_w
self.met_before_s = met_before_s
self.met_before_o = met_before_o
self.max_contacts = max_contacts
self.fractional_infections = fractional_infections
self.quarantine_length = quarantine_length
self.compliance = compliance
self.latent_period = latent_period
def _trace_contacts(
self, case, do_tracing, trace_prob, n_contacts, contacts_prevented
):
contacts_traced = np.zeros(shape=n_contacts, dtype=bool)
if do_tracing:
if self.isolate_contacts_on_symptoms or (
self.isolate_contacts_on_positive and case.covid
):
contacts_traced = self.rng.binomial(
n=1, p=trace_prob, size=n_contacts
).astype(bool)
contacts_traced = contacts_traced & ~contacts_prevented
return contacts_traced
def _isolate_contacts(self, n_contacts, contacts_traced, contacts_prevented):
# Work out if each contact will adhere to the policy
contacts_adherence = self.rng.binomial(
n=1, p=self.compliance, size=n_contacts
).astype(bool)
# Compute which contact will isolate because of the contact trace
contacts_isolated = contacts_traced & contacts_adherence & ~contacts_prevented
return contacts_isolated
def _count_symptomatic_asymptomatic(
self, n_contacts, contacts_infected, contacts_isolated
):
contacts_symptomatic = (
self.rng.binomial(
n=1, p=config.PROP_COVID_SYMPTOMATIC, size=n_contacts
).astype(bool)
& contacts_infected
)
symptomatic = contacts_isolated & contacts_infected & contacts_symptomatic
asymptomatic = contacts_isolated & contacts_infected & ~contacts_symptomatic
return symptomatic, asymptomatic
def _count_contacts_quarantine_days(
self,
case,
isolate_on_symptoms,
isolate_on_positive,
contacts_isolated,
test_contacts_on_positive,
contacts_tested_positive,
):
# NOTE: working with "quarantined" as this represents those traced who were still contacted and complied
if isolate_on_symptoms and not case.covid:
return self.testing_delay * contacts_isolated.sum()
elif (isolate_on_symptoms or isolate_on_positive) and case.covid:
# NOTE: for now assume that people are tested on the same day as isolated.
# If we are testing contacts on positive, then we will only need to quarantine those who are positive after the test
# TODO: testing might be inefficient during latent period, perhaps we should quarantine contacts for latent_period and then test?
if test_contacts_on_positive:
# Those testing negative will spend testing_delay days in quarantine
test_isolation_days = (
self.testing_delay
* (contacts_isolated & ~contacts_tested_positive).sum()
)
# Those who test positive will go into full quarantine
quarantine_days = (
self.quarantine_length
* (contacts_isolated & contacts_tested_positive).sum()
)
return test_isolation_days + quarantine_days
# Full quarantine for all contacts if not testing them
return self.quarantine_length * contacts_isolated.sum()
else:
return 0
def _get_contact_trace_delay(
self,
contacts_trace_app,
contact_infections,
contacts_isolated,
contact_infection_days,
):
# Trace delay is at max the manual trace delay
trace_delay = self.manual_trace_delay * np.ones_like(contact_infection_days)
# Contacts found via the app are traced with app_delay - assumed to be faster than manual
contacts_trace_app_isolated = contacts_trace_app[
contact_infections & contacts_isolated
]
trace_delay[contacts_trace_app_isolated] = self.app_trace_delay
return trace_delay
def _get_fractional_metrics(
self,
case,
test_perform_day,
test_results_day,
infection_days,
trace_delay,
isolate_on_symptoms,
isolate_on_positive,
):
infectiousness_by_day = case.inf_profile
cumulative_infectiousness = np.cumsum(infectiousness_by_day)
infectious_period = len(cumulative_infectiousness)
# Compute day of contact becoming infectious after case started being infectious
infectious_start = infection_days + self.latent_period
if isolate_on_symptoms:
days_not_quarantined = (test_perform_day + trace_delay) - infectious_start
elif isolate_on_positive:
days_not_quarantined = (test_results_day + trace_delay) - infectious_start
else:
# If neither of these are true, then the case would not have made it to here
days_not_quarantined = infectious_period * np.ones(
len(infectious_start), dtype=int
)
# clip the infectious period to max out at 1
days_not_quarantined[
days_not_quarantined > infectious_period
] = infectious_period
fractional_num = len(days_not_quarantined)
# Only care about ones where there is more than zero days spent unisolated
days_not_quarantined = days_not_quarantined[days_not_quarantined > 0]
days_not_quarantined = days_not_quarantined.astype('int64')
# Add one to get indexing correct - 1st day infectious is 0 in array
cd = cumulative_infectiousness[days_not_quarantined - 1]
contact_cumulative_infectiousness = cd.sum()
return fractional_num, contact_cumulative_infectiousness
def tti_chronology(self, case, case_factors):
"""Calculate if and when a case was tested and isolated
Args:
case:
case_factors:
Returns:
"""
test_performed = case_factors.report_app or case_factors.report_manual
if not test_performed:
return test_performed, float("inf"), float("inf"), float("inf")
else:
test_perform_day = case.day_noticed_symptoms
test_results_day = test_perform_day + self.testing_delay
if self.isolate_individual_on_symptoms:
isolate_day = test_perform_day
elif self.isolate_individual_on_positive and case.covid:
isolate_day = test_results_day
else:
# Don't isolate, set to something beyond simulation horizon
# Float is fine here as long as used only for comparison
isolate_day = float("inf")
return test_performed, test_perform_day, test_results_day, isolate_day
def __call__(self, case, contacts, case_factors): # noqa: C901
"""Run a TTI strategy
Args:
case:
contacts:
case_factors:
Returns:
"""
(
test_performed,
test_perform_day,
test_results_day,
isolate_day,
) = self.tti_chronology(case, case_factors)
# Days on which individual made contact with their contacts. For home, earliest day of infectivity.
# home_contacts = contacts.home[:, 1]
work_contacts = contacts.work[:, 1]
othr_contacts = contacts.other[:, 1]
# Get the day on which a household member was infected
home_infected_day = contacts.home[:, 0]
# Get if an infection was caused in contacts
home_infections = (contacts.home[:, 0] >= 0).astype(bool)
work_infections = (contacts.work[:, 0] >= 0).astype(bool)
othr_infections = (contacts.other[:, 0] >= 0).astype(bool)
n_home = home_infections.shape[0]
n_work = work_infections.shape[0]
n_othr = othr_infections.shape[0]
# Compute reduction in contacts due to contact limiting policy. Independent of test status.
othr_contacts_limited = ~_limit_contact(othr_contacts, self.max_contacts)
# Compute reduction in contacts due to wfh. Independent of test status.
if case_factors.wfh:
work_contacts_wfh_limited = np.ones_like(work_contacts).astype(bool)
else:
work_contacts_wfh_limited = np.zeros_like(work_contacts).astype(bool)
if test_performed:
# Prevent contact after isolation day
home_contacts_prevented = (home_infected_day >= isolate_day).astype(bool)
work_contacts_prevented = (work_contacts >= isolate_day).astype(bool)
othr_contacts_prevented = (othr_contacts >= isolate_day).astype(bool)
# Remove contacts not made due to work from home
work_contacts_prevented = (
work_contacts_prevented | work_contacts_wfh_limited
)
# Remove other contact limiting contacts
othr_contacts_prevented = othr_contacts_prevented | othr_contacts_limited
# TRACING CONTACTS
work_contacts_trace_app = self._trace_contacts(
case,
self.do_app_tracing and case_factors.report_app,
self.app_cov,
n_work,
work_contacts_prevented,
)
othr_contacts_trace_app = self._trace_contacts(
case,
self.do_app_tracing and case_factors.report_app,
self.app_cov,
n_othr,
othr_contacts_prevented,
)
# Even if the primary case reported symptoms via the app, we do manual tracing anyway as a safety net
work_contacts_trace_manual = self._trace_contacts(
case,
self.do_manual_tracing,
self.manual_work_trace_prob
* (self.met_before_s if (case.category==0 or case.category==1) else self.met_before_w),
n_work,
work_contacts_prevented,
)
othr_contacts_trace_manual = self._trace_contacts(
case,
self.do_manual_tracing,
self.manual_othr_trace_prob * self.met_before_o,
n_othr,
othr_contacts_prevented,
)
# Assume all home contacts traced
if self.isolate_household_on_symptoms or (
self.isolate_household_on_positive and case.covid
):
home_contacts_traced = np.ones_like(n_home, dtype=bool)
else:
home_contacts_traced = np.zeros(shape=n_home, dtype=bool)
# Traced if traced either way and didn't isolate and prevent contact
work_contacts_traced = work_contacts_trace_app | work_contacts_trace_manual
othr_contacts_traced = othr_contacts_trace_app | othr_contacts_trace_manual
# Compute trace statistics
# Only trace if we want to isolate
if self.isolate_contacts_on_symptoms or (
self.isolate_contacts_on_positive and case.covid
):
manual_traces = (
work_contacts_trace_manual.sum() + othr_contacts_trace_manual.sum()
)
app_traces = (
work_contacts_trace_app.sum() + othr_contacts_trace_app.sum()
)
else:
manual_traces = 0.0
app_traces = 0.0
home_contacts_isolated = self._isolate_contacts(
n_home, home_contacts_traced, home_contacts_prevented
)
work_contacts_isolated = self._isolate_contacts(
n_work, work_contacts_traced, work_contacts_prevented
)
othr_contacts_isolated = self._isolate_contacts(
n_othr, othr_contacts_traced, othr_contacts_prevented
)
# Do tests on the positive contacts if we want to, and find out which are asymptomatic
if self.test_contacts_on_positive:
(
work_tested_symptomatic,
work_tested_asymptomatic,
) = self._count_symptomatic_asymptomatic(
n_work, work_infections, work_contacts_isolated
)
work_tested_positive = (
work_tested_symptomatic & work_tested_asymptomatic
)
(
othr_tested_symptomatic,
othr_tested_asymptomatic,
) = self._count_symptomatic_asymptomatic(
n_othr, othr_infections, othr_contacts_isolated
)
othr_tested_positive = (
othr_tested_symptomatic & othr_tested_asymptomatic
)
else:
work_tested_positive = None
othr_tested_positive = None
total_tests_performed = 0
# count own test
# TODO: Janky - if no contact tracing is going on, do NOT test the person
if any(
[
self.do_app_tracing,
self.do_manual_tracing,
self.isolate_contacts_on_positive,
self.isolate_contacts_on_symptoms,
]
):
total_tests_performed += 1
# If house isolated on symptoms, or on positive
# These tests will not count against the primary case, as these would have been tested regardless.
if case.covid:
total_tests_performed += 0.0 # home_contacts_isolated.sum()
# If contacts isolated on symptoms, or on positive
# TODO: Again, after conversations, we will not test traced contacts unless a particular policy decision is made.
# We do not count cases that would become positive and symptomatic against the primary case, but do count others.
if case.covid: # and test_contacts_on_positive:
total_tests_performed += (
0 # work_contacts_isolated.sum() + othr_contacts_isolated.sum()
)
# Test contacts on positive test of the primary case. Only count the test excluding the symptomatic cases
if self.test_contacts_on_positive and case.covid:
total_tests_performed += (
work_contacts_isolated & ~work_tested_symptomatic
).sum()
total_tests_performed += (
othr_contacts_isolated & ~othr_tested_symptomatic
).sum()
# Compute the quarantine days
person_days_quarantine = 0
# If person has covid, require full quarantine
if case.covid and (
self.isolate_individual_on_symptoms
or self.isolate_individual_on_positive
):
person_days_quarantine += self.quarantine_length
# If not, only require the test delay days of quarantine
elif self.isolate_individual_on_symptoms:
person_days_quarantine += self.testing_delay
# Don't add any if: not isolating at all, individual only isolating after test complete
person_days_quarantine += self._count_contacts_quarantine_days(
case,
self.isolate_household_on_symptoms,
self.isolate_household_on_positive,
home_contacts_isolated,
# irrelevant parameters for household
test_contacts_on_positive=False,
contacts_tested_positive=None,
)
person_days_quarantine += self._count_contacts_quarantine_days(
case,
self.isolate_contacts_on_symptoms,
self.isolate_contacts_on_positive,
work_contacts_isolated,
self.test_contacts_on_positive,
work_tested_positive,
)
person_days_quarantine += self._count_contacts_quarantine_days(
case,
self.isolate_contacts_on_symptoms,
self.isolate_contacts_on_positive,
othr_contacts_isolated,
self.test_contacts_on_positive,
othr_tested_positive,
)
else:
# No tracing took place if they didn't get tested positive.
home_contacts_isolated = np.zeros(shape=n_home, dtype=bool)
work_contacts_isolated = np.zeros(shape=n_work, dtype=bool)
othr_contacts_isolated = np.zeros(shape=n_othr, dtype=bool)
# Default cases prevented (none)
home_contacts_prevented = np.zeros(shape=n_home, dtype=bool)
work_contacts_prevented = work_contacts_wfh_limited
othr_contacts_prevented = othr_contacts_limited
manual_traces = 0
app_traces = 0
total_tests_performed = 0
person_days_quarantine = 0
# Compute the base reproduction rate
base_rr = home_infections.sum() + work_infections.sum() + othr_infections.sum()
# Compute the reproduction rate due to the policy
# Remove infections due to case isolation
home_infections_post_isolation = home_infections & ~home_contacts_prevented
work_infections_post_isolation = work_infections & ~work_contacts_prevented
othr_infections_post_isolation = othr_infections & ~othr_contacts_prevented
# Count traced contacts as not included in the R TODO: make a proportion
home_infections_post_policy = (
home_infections_post_isolation & ~home_contacts_isolated
)
work_infections_post_policy = (
work_infections_post_isolation & ~work_contacts_isolated
)
othr_infections_post_policy = (
othr_infections_post_isolation & ~othr_contacts_isolated
)
# Count fractional cases - will only occur if got tested
if test_performed and self.fractional_infections:
# Get the days on which infections that were quarantined happened
home_infection_days = home_infected_day[
home_infections & home_contacts_isolated
]
work_infection_days = work_contacts[
work_infections & work_contacts_isolated
]
othr_infection_days = othr_contacts[
othr_infections & othr_contacts_isolated
]
work_trace_delay = self._get_contact_trace_delay(
work_contacts_trace_app,
work_infections,
work_contacts_isolated,
work_infection_days,
)
othr_trace_delay = self._get_contact_trace_delay(
othr_contacts_trace_app,
othr_infections,
othr_contacts_isolated,
othr_infection_days,
)
# Home contacts traced immediately
home_trace_delay = np.zeros_like(home_infection_days)
(
fractional_num_home,
home_cumulative_infectiousness,
) = self._get_fractional_metrics(
case,
test_perform_day,
test_results_day,
home_infection_days,
home_trace_delay,
self.isolate_household_on_symptoms,
self.isolate_household_on_positive,
)
(
fractional_num_work,
work_cumulative_infectiousness,
) = self._get_fractional_metrics(
case,
test_perform_day,
test_results_day,
work_infection_days,
work_trace_delay,
self.isolate_contacts_on_symptoms,
self.isolate_contacts_on_positive,
)
(
fractional_num_othr,
othr_cumulative_infectiousness,
) = self._get_fractional_metrics(
case,
test_perform_day,
test_results_day,
othr_infection_days,
othr_trace_delay,
self.isolate_contacts_on_symptoms,
self.isolate_contacts_on_positive,
)
# fractional_num = (
# fractional_num_home + fractional_num_work + fractional_num_othr
# )
fractional_R = (
home_cumulative_infectiousness
+ work_cumulative_infectiousness
+ othr_cumulative_infectiousness
)
# inverse_fractional_R = fractional_num - fractional_R
# home_fractional_R = home_cumulative_infectiousness
home_inverse_fractional_R = (
fractional_num_home - home_cumulative_infectiousness
)
else:
fractional_R = 0.0
home_cumulative_infectiousness = 0.0
# inverse_fractional_R = 0.0
# home_fractional_R = 0.0
home_inverse_fractional_R = 0.0
# Count the reduced infection rate
reduced_rr = (
home_infections_post_policy.sum()
+ work_infections_post_policy.sum()
+ othr_infections_post_policy.sum()
+ fractional_R
)
social_distancing_infections_prevented = (
work_contacts_wfh_limited & work_infections
).sum() + (othr_contacts_limited & othr_infections).sum()
symptom_isolation_infections_prevented = (
(home_contacts_prevented & home_infections).sum()
+ (work_contacts_prevented & work_infections).sum()
+ (othr_contacts_prevented & othr_infections).sum()
+ home_inverse_fractional_R
- social_distancing_infections_prevented
)
contact_tracing_infections_prevented = (
base_rr
- reduced_rr
- social_distancing_infections_prevented
- symptom_isolation_infections_prevented
)
return {
RETURN_KEYS.base_r: base_rr if case.covid else np.nan,
RETURN_KEYS.reduced_r: reduced_rr if case.covid else np.nan,
RETURN_KEYS.man_trace: manual_traces,
RETURN_KEYS.app_trace: app_traces,
RETURN_KEYS.tests: total_tests_performed,
RETURN_KEYS.quarantine: person_days_quarantine,
RETURN_KEYS.covid: case.covid,
RETURN_KEYS.symptomatic: case.symptomatic,
RETURN_KEYS.tested: test_performed and self.do_symptom_testing,
RETURN_KEYS.secondary_infections: home_infections.sum()
+ work_infections.sum()
+ othr_infections.sum(),
RETURN_KEYS.cases_prevented_social_distancing: social_distancing_infections_prevented,
RETURN_KEYS.cases_prevented_symptom_isolating: symptom_isolation_infections_prevented,
RETURN_KEYS.cases_prevented_contact_tracing: contact_tracing_infections_prevented,
RETURN_KEYS.fractional_r: fractional_R - home_cumulative_infectiousness,
}
|
#!/usr/local/bin/python
from argparse import ArgumentParser
from datetime import datetime
from os import chdir, getcwd, mkdir, system
from shutil import rmtree
from sys import exit
from time import time
from ftplib import FTP
arg = ArgumentParser()
#arg.add_argument('-s', help="Start Date")
#arg.add_argument('-e', help="End Date")
arg.add_argument('-d', help="Max Domains")
arg.add_argument('-t', help="Namelist Template Directory")
arg.add_argument('-H', help="Path to host list file")
args = arg.parse_args()
NOW = datetime.now()
DIR_OUT = getcwd() + '/'
DIR_LOG = DIR_OUT + 'logs/'
DIR_LOCAL_TMP = '/tmp/%s/' % NOW.strftime('%Y-%m-%d_%H-%M-%S')
DIR_REMOTE_TMP = '/usr/local/wrf/tmp/%s/' % NOW.strftime('%Y-%m-%d_%H-%M-%S')
DIR_GFS = DIR_LOCAL_TMP + 'gfs/'
DIR_WRF_ROOT = '/usr/local/wrf/%s/'
DIR_WPS = DIR_WRF_ROOT % 'WPS'
DIR_WRF = DIR_WRF_ROOT % 'WRFV3/test/em_real'
DIR_WPS_GEOG = DIR_WRF_ROOT % 'WPS_GEOG'
if args.t != None:
DIR_TEMPLATES = args.t + '/'
else:
DIR_TEMPLATES = DIR_WRF_ROOT % 'templates'
CMD_LN = 'ln -sf %s %s'
CMD_CP = 'cp %s %s'
CMD_MV = 'mv %s %s'
CMD_CHMOD = 'chmod -R %s %s'
CMD_LINK_GRIB = DIR_WPS + 'link_grib.csh ./gfs/gfs.'
CMD_GEOGRID = DIR_WPS + 'geogrid.exe >& geogrid.exe.log'
CMD_UNGRIB = DIR_WPS + 'ungrib.exe >& ungrib.exe.log'
CMD_METGRID = DIR_WPS + 'metgrid.exe >& metgrid.exe.log'
CMD_REAL = DIR_WRF + 'real.exe >& real.exe.log'
CMD_WRF: str = 'time /usr/local/wrf/LIBRARIES/mpich/bin/mpiexec -f %s %swrf.exe >& wrf.exe.log' % (DIR_TEMPLATES + 'hosts', DIR_WRF)
FTP_PATH = 'ftp://ftpprd.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/gfs.%s/'
if args.d != None and args.d > 0:
MAX_DOMAINS = int(args.d)
else:
MAX_DOMAINS = 1
try:
with open(DIR_TEMPLATES + 'namelist.wps', 'r') as namelist:
NAMELIST_WPS = namelist.read()
with open(DIR_TEMPLATES + 'namelist.input', 'r') as namelist:
NAMELIST_WRF = namelist.read()
except:
print('Error reading namelist files')
exit()
try: rmtree(DIR_LOCAL_TMP)
except: pass
mkdir(DIR_LOCAL_TMP)
try: rmtree(DIR_LOG)
except: pass
mkdir(DIR_LOG)
mkdir(DIR_GFS)
chdir(DIR_LOCAL_TMP)
# WPS Links
cmd = CMD_LN % (DIR_WPS + 'geogrid', './')
cmd = cmd + '; ' + CMD_LN % (DIR_WPS + 'metgrid', './')
cmd = cmd + '; ' + CMD_LN % (DIR_WPS + 'ungrib/Variable_Tables/Vtable.GFSPARA', 'Vtable')
system(cmd)
# WRF Links
cmd = CMD_LN % (DIR_WRF + '*.TBL', './')
cmd = cmd + '; ' + CMD_LN % (DIR_WRF + '*_DATA', './')
system(cmd)
# Insert Dates into Namelists
cur_hour = NOW.hour
if cur_hour >= 0 and cur_hour < 6: z = 0
elif cur_hour >= 6 and cur_hour < 12: z = 6
elif cur_hour >= 12 and cur_hour < 18: z = 12
else: z = 18
run_date = NOW.replace(hour = z,minute = 0,second = 0,microsecond = 0)
forecast_start = run_date.replace(day = run_date.day + 1,hour = 6)
forecast_end = forecast_start.replace(day = forecast_start.day + 1, hour = 9)
# WPS Namelist
wps_dates = ' start_date = '
for i in range(0, MAX_DOMAINS):
wps_dates = wps_dates + forecast_start.strftime("'%Y-%m-%d_%H:%M:%S', ")
wps_dates = wps_dates + '\n end_date = '
for i in range(0, MAX_DOMAINS):
wps_dates = wps_dates + forecast_end.strftime("'%Y-%m-%d_%H:%M:%S', ")
with open('namelist.wps', 'w') as namelist:
namelist.write(NAMELIST_WPS.replace('%DATES%', wps_dates))
# WRF Namelist
wrf_dates = ' start_year = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_start.strftime('%Y, ')
wrf_dates = wrf_dates + '\n start_month = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_start.strftime('%m, ')
wrf_dates = wrf_dates + '\n start_day = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_start.strftime('%d, ')
wrf_dates = wrf_dates + '\n start_hour = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_start.strftime('%H, ')
wrf_dates = wrf_dates + '\n start_minute = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + '00, '
wrf_dates = wrf_dates + '\n start_second = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + '00, '
wrf_dates = wrf_dates + '\n end_year = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_end.strftime('%Y, ')
wrf_dates = wrf_dates + '\n end_month = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_end.strftime('%m, ')
wrf_dates = wrf_dates + '\n end_day = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_end.strftime('%d, ')
wrf_dates = wrf_dates + '\n end_hour = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + forecast_end.strftime('%H, ')
wrf_dates = wrf_dates + '\n end_minute = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + '00, '
wrf_dates = wrf_dates + '\n end_second = '
for i in range(0, MAX_DOMAINS):
wrf_dates = wrf_dates + '00, '
with open('namelist.input', 'w') as namelist:
namelist.write(NAMELIST_WRF.replace('%DATES%', wrf_dates))
start_date = run_date.strftime('%Y%m%d%H')
local_gfs = DIR_GFS + 'gfs.' + start_date
remote_gfs = FTP_PATH % start_date
startTime = int(time())
ftpQueue = []
for i in range(0, 75, 3):
hr = '%03d' % i
#file_name = 'gfs.t%sz.pgrbf%s.grib2' % (start_date[-2:], hr)
file_name = 'gfs.t%sz.pgrb2.1p00.f%s' % (start_date[-2:], hr)
local_path = local_gfs + file_name
remote_path = remote_gfs + file_name
ftpQueue.append((remote_path, local_path))
FTP.getMulti(ftpQueue)
# Link the grib files
system(CMD_LINK_GRIB)
elapsed = int(time()) - startTime
print('GFS retrieved in: ' + str(elapsed))
startTime = int(time())
system(CMD_GEOGRID)
elapsed = int(time()) - startTime
print('Geogrid ran in: ' + str(elapsed))
startTime = int(time())
system(CMD_UNGRIB)
elapsed = int(time()) - startTime
print('Ungrib ran in: ' + str(elapsed))
startTime = int(time())
system(CMD_METGRID)
elapsed = int(time()) - startTime
print('Metgrid ran in: ' + str(elapsed))
startTime = int(time())
system(CMD_REAL)
elapsed = int(time()) - startTime
print('Real ran in: ' + str(elapsed))
cmd = CMD_CP % (DIR_LOCAL_TMP + 'rsl.*', DIR_LOG + 'real/')
mkdir (DIR_LOG + 'real/')
system(cmd)
startTime = int(time())
system(CMD_MV % (DIR_LOCAL_TMP, DIR_REMOTE_TMP))
chdir(DIR_REMOTE_TMP)
system(CMD_CHMOD % ('777', DIR_REMOTE_TMP))
elapsed = int(time()) - startTime
print('Files copied in: ' + str(elapsed))
startTime = int(time())
system(CMD_WRF)
elapsed = int(time()) - startTime
print('WRF ran in: ' + str(elapsed))
cmd = CMD_CP % (DIR_REMOTE_TMP + 'wrfout*', DIR_OUT)
cmd = cmd + '; ' + CMD_CP % (DIR_REMOTE_TMP + '*.log rsl.*', DIR_LOG)
system(cmd)
#rmtree(DIR_REMOTE_TMP)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 <NAME> <thiebaud at weksteen dot fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# In OpenBSD 6.1, softraid crypto switched to bcrypt PBKDF instead of PKCS5
# PBKDF2.
#
# References,
#
# http://thiébaud.fr/openbsd_softraid.html
# http://www.openbsd.org/faq/upgrade61.html
# https://github.com/openbsd/src/blob/master/sys/dev/softraid.c
# https://github.com/openbsd/src/blob/master/sys/dev/softraidvar.h
import sys
import struct
import os.path
from binascii import hexlify
SR_CRYPTOKDFT_PKCS5_PBKDF2 = 1
SR_CRYPTOKDFT_KEYDISK = 2
SR_CRYPTOKDFT_BCRYPT_PBKDF = 3
def process_file(filename):
headers = open(filename).read()[:0xaa0 + 81920]
start = headers.find("marcCRAM")
if start != -1:
headers = headers[start:]
if headers[:8] != "marcCRAM":
sys.stderr.write(filename + " : Wrong magic\n")
return
if headers[72:81] != "SR CRYPTO":
sys.stderr.write(filename + " : Wrong RAID type\n")
return
if headers[260] != "\x01":
sys.stderr.write(filename + " : Wrong optional header type\n")
return
if headers[284] != "\x02":
sys.stderr.write(filename + " : Wrong encryption type\n")
return
sr_crypto_genkdf_type = struct.unpack("<I", headers[2416:2420])[0]
if (sr_crypto_genkdf_type != SR_CRYPTOKDFT_PKCS5_PBKDF2 and
sr_crypto_genkdf_type != SR_CRYPTOKDFT_BCRYPT_PBKDF):
sys.stderr.write("%s : kdf of type '%s' is not supported yet!\n" %
(os.path.basename(filename), sr_crypto_genkdf_type))
return
sys.stdout.write(os.path.basename(filename) + ":$openbsd-softraid$")
# num_iterations and salt come from the "scm_kdfhint" field
num_iterations = struct.unpack("<I", headers[2420:2424])[0]
sys.stdout.write(str(num_iterations) + "$")
sys.stdout.write(hexlify(headers[2424:2552]) + "$") # salt
# masked keys, sr_meta_crypto structure
sys.stdout.write(hexlify(headers[364:2412]) + "$")
# HMAC, chk_hmac_sha1 field
sys.stdout.write(hexlify(headers[2676:2696]))
sys.stdout.write("$%s\n" % sr_crypto_genkdf_type)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stdout.write("Usage: openbsd_softraid2john [disk image]\n")
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
in_num_col_dims = draw(st.integers(min_value=1, max_value=1))
padding_weights = draw(st.integers(min_value=1, max_value=1))
start_axis = draw(st.integers(min_value=0, max_value=len(in_shape) - 1))
stop_axis = draw(
st.integers(
min_value=start_axis, max_value=len(in_shape) - 1))
assume((stop_axis - start_axis) == 2)
start_axis = 1
if start_axis == 0:
flatten_out_shape = [
in_shape[0] * in_shape[1] * in_shape[2], in_shape[3]
]
else:
flatten_out_shape = [
in_shape[0], in_shape[1] * in_shape[2] * in_shape[3]
]
weights_0 = 1
weights_1 = 1
for i in range(len(flatten_out_shape)):
if (i < in_num_col_dims):
weights_1 = weights_1 * flatten_out_shape[i]
else:
weights_0 = weights_0 * flatten_out_shape[i]
weights_shape = [weights_0, weights_1]
bias_shape = [weights_1]
flatten_op = OpConfig(
type='flatten_contiguous_range',
inputs={"X": ["input_data_x"]},
outputs={"Out": ["flatten_output_data"],
"XShape": ["xshape_data"]},
attrs={
"data_format": 'nchw',
"start_axis": start_axis,
"stop_axis": stop_axis
})
fc_inputs = {}
program_inputs = {}
def generate_weights(*args, **kwargs):
return (np.random.random(weights_shape).astype(np.float32) - 0.5) * 2
def generate_bias(*args, **kwargs):
return (np.random.random(bias_shape).astype(np.float32) - 0.5) * 2
with_bias = draw(st.sampled_from([True])) #pass require with_bias as True
act_type = ""
if (with_bias and np.random.random() > 0.5):
act_type = "relu"
if (with_bias):
fc_inputs = {
"Input": ["flatten_output_data"],
"W": ["weights_data"],
"Bias": ["bias_data"]
}
program_inputs = {
"input_data_x": TensorConfig(shape=in_shape),
"weights_data": TensorConfig(data_gen=partial(generate_weights)),
"bias_data": TensorConfig(data_gen=partial(generate_bias))
}
else:
fc_inputs = {"Input": ["flatten_output_data"], "W": ["weights_data"]}
program_inputs = {
"input_data_x": TensorConfig(shape=in_shape),
"weights_data": TensorConfig(data_gen=partial(generate_weights))
}
fc_op = OpConfig(
type='fc',
inputs=fc_inputs,
outputs={"Out": ["output_data"]},
attrs={
"in_num_col_dims": in_num_col_dims,
"padding_weights": padding_weights,
"activation_type": act_type,
"use_mkldnn": False,
"padding_weights": False,
"use_quantizer": False,
"Scale_in": float(1),
"Scale_weights": [float(1)],
"Scale_out": float(1)
})
ops = [flatten_op, fc_op]
program_config = ProgramConfig(
ops=ops,
weights={"xshape_data": TensorConfig(shape=in_shape)},
inputs=program_inputs,
outputs=["output_data"])
return program_config
|
<reponame>NixGD/ergo<gh_stars>0
import math
import os
from types import SimpleNamespace
from typing import cast
from dotenv import load_dotenv
import jax.numpy as np
import pytest
import ergo
from ergo.distributions import Logistic, LogisticMixture, Truncate
from ergo.scale import LogScale, Scale, TimeScale
def three_sd_scale(loc, s):
sd = s * math.pi / math.sqrt(3)
return Scale(loc - 3 * sd, loc + 3 * sd)
def easyLogistic(loc, scale):
return Logistic(loc, scale, three_sd_scale(loc, scale))
@pytest.fixture(scope="module")
def normalized_logistic_mixture():
return LogisticMixture(
components=[
Logistic(loc=0.15, s=0.037034005, scale=Scale(0, 1)),
Logistic(loc=0.85, s=0.032395907, scale=Scale(0, 1)),
],
probs=[0.6, 0.4],
)
@pytest.fixture(scope="module")
def logistic_mixture():
xscale = Scale(0, 150000)
return LogisticMixture(
components=[
Logistic(loc=10000, s=1000, scale=xscale),
Logistic(loc=100000, s=10000, scale=xscale),
],
probs=[0.8, 0.2],
)
@pytest.fixture(scope="module")
def logistic_mixture10():
xscale = Scale(-20, 40)
return LogisticMixture(
components=[
Logistic(loc=15, s=2.3658268, scale=xscale),
Logistic(loc=5, s=2.3658268, scale=xscale),
],
probs=[0.5, 0.5],
)
@pytest.fixture(scope="module")
def logistic_mixture_p_uneven():
xscale = Scale(-10, 20)
return LogisticMixture(
components=[
Logistic(loc=10, s=3, scale=xscale),
Logistic(loc=5, s=5, scale=xscale),
],
probs=[1.8629593e-29, 1.0],
)
@pytest.fixture(scope="module")
def truncated_logistic_mixture():
xscale = Scale(5000, 120000)
return LogisticMixture(
components=[
Truncate(
Logistic(loc=10000, s=1000, scale=xscale), floor=5000, ceiling=500000
),
Truncate(
Logistic(loc=100000, s=10000, scale=xscale), floor=5000, ceiling=500000
),
],
probs=[0.8, 0.2],
)
@pytest.fixture(scope="module")
def logistic_mixture_p_overlapping():
xscale = three_sd_scale(4000000.035555004, 200000.02)
return LogisticMixture(
components=[
Logistic(4000000.035555004, 200000.02, xscale),
Logistic(4000000.0329152746, 200000.0, xscale),
],
probs=[0.5, 0.5],
)
@pytest.fixture(scope="module")
def logistic_mixture_norm_test():
xscale = Scale(-50, 50)
return LogisticMixture(
components=[Logistic(-40, 1, xscale), Logistic(50, 10, xscale)],
probs=[0.5, 0.5],
)
@pytest.fixture(scope="module")
def logistic_mixture15():
xscale = Scale(-10, 40)
return LogisticMixture(
components=[
Logistic(loc=10, s=3.658268, scale=xscale),
Logistic(loc=20, s=3.658268, scale=xscale),
],
probs=[0.5, 0.5],
)
@pytest.fixture(scope="module")
def logistic_mixture_samples(logistic_mixture, n=1000):
return np.array([logistic_mixture.sample() for _ in range(0, n)])
@pytest.fixture(scope="module")
def log_question_data():
return {
"id": 0,
"possibilities": {
"type": "continuous",
"scale": {"deriv_ratio": 10, "min": 1, "max": 10},
},
"title": "question_title",
}
@pytest.fixture(scope="module")
def metaculus():
load_dotenv()
uname = cast(str, os.getenv("METACULUS_USERNAME"))
pwd = cast(str, os.getenv("<PASSWORD>"))
user_id_str = cast(str, os.getenv("METACULUS_USER_ID"))
if None in [uname, pwd, user_id_str]:
raise ValueError(
".env is missing METACULUS_USERNAME, METACULUS_PASSWORD, or METACULUS_USER_ID"
)
user_id = int(user_id_str)
metaculus = ergo.Metaculus(uname, pwd)
assert metaculus.user_id == user_id
return metaculus
@pytest.fixture(scope="module")
def metaculus_questions(metaculus, log_question_data):
questions = SimpleNamespace()
questions.continuous_linear_closed_question = metaculus.get_question(3963)
questions.continuous_linear_open_question = metaculus.get_question(3962)
questions.continuous_linear_date_open_question = metaculus.get_question(4212)
questions.continuous_log_open_question = metaculus.get_question(3961)
questions.closed_question = metaculus.get_question(3965)
questions.binary_question = metaculus.get_question(3966)
questions.log_question = metaculus.make_question_from_data(log_question_data)
return questions
@pytest.fixture(scope="module")
def date_samples(metaculus_questions, normalized_logistic_mixture):
return metaculus_questions.continuous_linear_date_open_question.denormalize_samples(
np.array([normalized_logistic_mixture.sample() for _ in range(0, 1000)])
)
@pytest.fixture(scope="module")
def point_densities():
return make_point_densities()
def make_point_densities():
xs = np.array(
[
-0.22231131421566422,
0.2333153619512007,
0.6889420381180656,
1.1445687142849306,
1.6001953904517954,
2.0558220666186604,
2.5114487427855257,
2.9670754189523905,
]
)
densities = np.array(
[
0.05020944540593859,
0.3902426887736647,
0.5887675161478794,
0.19516571803813396,
0.33712516238248535,
0.4151935926066581,
0.16147625748938946,
0.03650993407810862,
]
)
return {"xs": xs, "densities": densities}
scales_to_test = [
Scale(0, 1),
Scale(0, 10000),
Scale(-1, 1),
LogScale(0.01, 100, 10),
LogScale(0.01, 1028, 2),
TimeScale(631152000, 946684800),
TimeScale(2000, 2051222400),
]
|
import sys
import json
import bson
import yaml
import os
import math
import numpy as np
from modelspec.base_types import print_
from modelspec.base_types import EvaluableExpression
verbose = False
def load_json(filename):
"""
Load a generic JSON file
"""
with open(filename) as f:
data = json.load(f, object_hook=ascii_encode_dict)
return data
def load_yaml(filename):
"""
Load a generic YAML file
"""
with open(filename) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
return data
def load_bson(filename):
"""
Load a generic BSON file
"""
with open(filename, "rb") as infile:
data_encoded = infile.read()
data = bson.decode(data_encoded)
return data
def save_to_json_file(info_dict, filename, indent=4):
strj = json.dumps(info_dict, indent=indent)
with open(filename, "w") as fp:
fp.write(strj)
def save_to_yaml_file(info_dict, filename, indent=4):
if sys.version_info[0] == 2:
stry = yaml.dump(info_dict, indent=indent, default_flow_style=False)
else:
stry = yaml.dump(info_dict, indent=indent, sort_keys=False)
with open(filename, "w") as fp:
fp.write(stry)
def ascii_encode_dict(data):
ascii_encode = (
lambda x: x.encode("ascii")
if (sys.version_info[0] == 2 and isinstance(x, unicode))
else x
)
return dict(map(ascii_encode, pair) for pair in data.items())
def _parse_element(dict_format, to_build):
if verbose:
print("Parse for element: [%s]" % dict_format)
for k in dict_format.keys():
if verbose:
print(
" Setting id: {} in {} ({})".format(k, type.__name__, type(to_build))
)
to_build.id = k
to_build = _parse_attributes(dict_format[k], to_build)
return to_build
def _parse_attributes(dict_format, to_build):
for key in dict_format:
value = dict_format[key]
new_format = True
if verbose:
print(
" Setting {}={} ({}) in {}".format(key, value, type(value), to_build)
)
if new_format:
if type(to_build) == dict:
to_build[key] = value
elif key in to_build.allowed_children:
type_to_use = to_build.allowed_children[key][1]
for v in value:
ff = type_to_use()
if verbose:
print(f" Type for {key}: {type_to_use} ({ff})")
ff = _parse_element({v: value[v]}, ff)
exec("to_build.%s.append(ff)" % key)
else:
if (
type(value) == str
or type(value) == int
or type(value) == float
or type(value) == bool
or type(value) == list
or value is None
):
to_build.__setattr__(key, value)
else:
type_to_use = to_build.allowed_fields[key][1]
if verbose:
print(
"type_to_use: {} ({})".format(
type_to_use, type(type_to_use)
)
)
print(f"- {key} = {value}")
if type_to_use == EvaluableExpression:
vv = {}
to_build.__setattr__(key, vv)
else:
ff = type_to_use()
ff = _parse_attributes(value, ff)
exec("to_build.%s = ff" % key)
else:
if type(to_build) == dict:
to_build[key] = value
elif type(value) == str or type(value) == int or type(value) == float:
to_build.__setattr__(key, value)
elif type(value) == list:
type_to_use = to_build.allowed_children[key][1]
for vl in value:
ff = type_to_use()
ff = _parse_element(vl, ff)
exec("to_build.%s.append(ff)" % key)
else:
type_to_use = to_build.allowed_fields[key][1]
ff = type_to_use()
ff = _parse_attributes(value, ff)
exec("to_build.%s = ff" % key)
return to_build
def locate_file(f, base_dir):
"""
Utility method for finding full path to a filename as string
"""
if base_dir is None:
return f
file_name = os.path.join(base_dir, f)
real = os.path.realpath(file_name)
# print_v('- Located %s at %s'%(f,real))
return real
def _val_info(param_val):
if type(param_val) == np.ndarray:
pp = "%s" % (np.array2string(param_val, threshold=4, edgeitems=1))
pp = pp.replace("\n", "")
pp += f" (NP {param_val.shape} {param_val.dtype})"
elif type(param_val).__name__ == "EagerTensor":
pp = "%s" % param_val
pp = pp.replace("\n", "")
# pp+=' (TF %s %s)'%(param_val.shape,param_val.dtype)
elif type(param_val) == tuple:
# If param_val is a tuple, recursively print its elements
# separated by commas and wrapped in parentheses
pp = "(" + ", ".join([_val_info(el) for el in param_val]) + ")"
else:
pp = "%s" % param_val
t = type(param_val)
if not (t == int or t == float):
pp += "(%s)" % (t if type(t) == str else t.__name__)
return pp
def _params_info(parameters, multiline=False):
"""
Short info on names, values and types in parameter list
"""
pi = "["
if parameters is not None and len(parameters) > 0:
for p in parameters:
if not p == "__builtins__":
param_val = parameters[p]
pp = _val_info(param_val)
pi += "{}={}, {}".format(p, pp, "\n" if multiline else "")
pi = pi[:-2]
pi += "]"
return pi
# Ideas in development...
FORMAT_NUMPY = "numpy"
FORMAT_TENSORFLOW = "tensorflow"
def evaluate(expr, parameters={}, rng=None, array_format=FORMAT_NUMPY, verbose=False):
"""
Evaluate a general string like expression (e.g. "2 * weight") using a dict
of parameters (e.g. {'weight':10}). Returns floats, ints, etc. if that's what's
given in expr
"""
if array_format == FORMAT_TENSORFLOW:
import tensorflow as tf
print_(
" > Evaluating: [%s] which is a: %s, vs parameters: %s (using %s arrays)..."
% (expr, type(expr).__name__, _params_info(parameters), array_format),
verbose,
)
try:
if type(expr) == str and expr in parameters:
expr = parameters[
expr
] # replace with the value in parameters & check whether it's float/int...
print_("Using for that param: %s" % _val_info(expr), verbose)
if type(expr) == str:
try:
if array_format == FORMAT_TENSORFLOW:
expr = tf.constant(int(expr))
else:
expr = int(expr)
except:
pass
try:
if array_format == FORMAT_TENSORFLOW:
expr = tf.constant(float(expr))
else:
expr = float(expr)
except:
pass
if type(expr) == list:
print_("Returning a list in format: %s" % array_format, verbose)
if array_format == FORMAT_TENSORFLOW:
return tf.constant(expr, dtype=tf.float64)
else:
return np.array(expr)
if type(expr) == np.ndarray:
print_("Returning a numpy array in format: %s" % array_format, verbose)
if array_format == FORMAT_TENSORFLOW:
return tf.convert_to_tensor(expr, dtype=tf.float64)
else:
return np.array(expr)
if "Tensor" in type(expr).__name__:
print_(
"Returning a tensorflow Tensor in format: %s" % array_format, verbose
)
if array_format == FORMAT_NUMPY:
return expr.numpy()
else:
return expr
if int(expr) == expr:
print_("Returning int: %s" % int(expr), verbose)
return int(expr)
else: # will have failed if not number
print_("Returning float: %s" % expr, verbose)
return float(expr)
except:
try:
if rng:
expr = expr.replace("random()", "rng.random()")
parameters["rng"] = rng
if type(expr) == str and "math." in expr:
parameters["math"] = math
if type(expr) == str and "numpy." in expr:
parameters["numpy"] = np
print_(
"Trying to eval [%s] with Python using %s..."
% (expr, parameters.keys()),
verbose,
)
v = eval(expr, parameters)
print_("Evaluated with Python: {} = {}".format(expr, _val_info(v)), verbose)
if (type(v) == float or type(v) == str) and int(v) == v:
print_("Returning int: %s" % int(v), verbose)
if array_format == FORMAT_TENSORFLOW:
return tf.constant(int(v))
else:
return int(v)
return v
except Exception as e:
print_(f"Returning without altering: {expr} (error: {e})", verbose)
return expr
"""
Translates a string like '3', '[0,2]' to a list
"""
def parse_list_like(list_str):
if isinstance(list_str, int):
return [list_str]
elif isinstance(list_str, float):
return [list_str]
elif isinstance(list_str, list):
return list_str
elif type(list_str) == str:
try:
expr = int(list_str)
return [expr]
except:
pass
try:
expr = float(list_str)
return [expr]
except:
pass
if "[" in list_str:
return eval(list_str)
|
<filename>mod/UJlib.py
import collections
import copy
import datetime
import math
import os
import pickle
import re
from collections import ChainMap
import pandas as pd
import json
import shutil
import progressbar
def get_dictlist_findidx(dictlist, fkey):
"""idx 를 찾아줌 이런형식 [{'11': 100},,,]"""
idx = 0
if len(dictlist) > 0:
for v in dictlist:
if fkey in v.keys():
return idx
idx += 1
return -1
def get_dictlist_dict(dictlist, fkey):
if get_dictlist_findidx(dictlist, fkey) > -1:
return dictlist[get_dictlist_findidx(dictlist, fkey)]
# return next((item for item in dictlist if item[fkey] != ''), False)
else:
return False
def get_dictlist_value_int(dictlist, fkey, DEBUGMODE=False):
"""딕리스트에서 fkey를 찾아 줌 못찾으면 0 이런형식 [{'11': 100},,,]"""
rs = get_dictlist_dict(dictlist, fkey)
if DEBUGMODE:
print(rs)
if rs == False:
return 0
else:
return int(rs[fkey])
def get_dictlist_value_str(dictlist, fkey, DEBUGMODE=False):
"""딕리스트에서 fkey를 찾아 줌 못찾으면 '' 이런형식 [{'11': 100},,,]"""
rs = get_dictlist_dict(dictlist, fkey)
if DEBUGMODE:
print(rs)
if rs == False:
return ''
else:
return str(rs[fkey])
def get_dictlist_sorted(dictlist): # TODO: 아 몰라 안되
"""중복 제거 및 정렬 이런형식 [{'11':100},,,]"""
# return dict(ChainMap(*dictlist))
# rt = [item for item in dictlist]
rt = sorted(dictlist, key=lambda item: [item for item in dictlist])
print("*" * 8, [v for v in [item for item in dictlist]])
return rt
def get_dictlist_org_findidx(dictlist, fkey, val):
idx = 0
for v in dictlist:
if v[fkey] == val:
return idx
idx += 1
return -1
def get_dictlist_org_only_sorted(dictlist, fkey):
"""fkey 로 정렬만 하기 이런형식 [{'code': '11', 'money': 100},,,]"""
# return sorted(dictlist, key=lambda sss: sss[0])
from operator import itemgetter
# return sorted(dictlist, key=itemgetter('11'))
return sorted(dictlist, key=lambda x: x[fkey])
def get_work_end(hms):
"""시간보다 지나갔으면 true"""
cuttime = datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d {}".format(hms)),
"%Y-%m-%d %H:%M:%S")
return (datetime.datetime.now() > cuttime) # 시간 끝났으면 true
def get_work_end_day(day, aftday=0):
"""날짜 str 을 넣고 aftday 를 지났으면 true"""
cuttime = datetime.datetime.strptime(day + " 23:59:59", "%Y-%m-%d %H:%M:%S")
cuttime = cuttime + datetime.timedelta(days=aftday)
if cuttime > datetime.datetime.now():
return False
else:
return True
def list_SaveToFile(slist, file_name="sample"):
file_name = "{}.pkl".format(file_name)
open_file = open(file_name, "wb")
pickle.dump(slist, open_file)
open_file.close()
def list_LoadFromFile(file_name="sample"):
file_name = "{}.pkl".format(file_name)
if os.path.isfile(file_name):
open_file = open(file_name, "rb")
slist = pickle.load(open_file)
open_file.close()
return slist
else:
return []
def dictlist_update(source, dict, fname=""):
"""source 에서 dict 의 키가 있는지 검사하고 추가 또는 수정 한다 [{'11': 100},,,]"""
idx = get_dictlist_findidx(source, "".join(dict.keys()))
if idx == -1: # 없으니가 추가
dictlist_append(source, dict, fname)
else:
source[idx] = dict
if fname != "":
list_SaveToFile(source, fname)
return source
def dictlist_append(source, appenddict, fname=""):
"""dictlist 에 추가 해주고 저장한다 [{'11': 100},,,]"""
source.append(appenddict)
if fname != "":
list_SaveToFile(source, fname)
return source
def dictlist_delete(source, idx=-1, key="", fname=""):
"""dictlist 에서 해당 idx 나 key 값을 찾아 지워주고 저장한다 [{'11': 100},,,]"""
if idx == -1 and key != "":
idx = get_dictlist_findidx(source, key)
if idx > -1:
del source[idx]
if fname != "":
list_SaveToFile(source, fname)
return source
def get_stock_diff(oldstocks, stocks):
# return 사라진것, 생긴것
if len(oldstocks) == 0:
if len(stocks) == 0:
return "0", []
else:
return "+", stocks
else:
if len(stocks) == 0:
return "-", oldstocks
else:
cl_sara = [v for v in oldstocks if v not in stocks]
cl_innun = [v for v in stocks if v not in oldstocks]
return cl_sara, cl_innun
def get_trading_val(money, basemoney, trading, start_time="09:00:00", timegugan=6):
"""거래량 기준 뽑기 starttime=09:00:00"""
if money > 0:
tr = trading / (money * 1) * basemoney * 1.6
else:
tr = trading
td = datetime.datetime.now() - datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d ") + start_time, "%Y-%m-%d %H:%M:%S")
if td < datetime.timedelta(minutes=1):
return tr
tds = math.trunc(td.total_seconds() / 60)
# tds = tds + 1
print(tds)
if tds > (timegugan * 60):
return int(tr)
else:
return int(tr * (tds / (timegugan * 60)))
def get_now(type="ymd"):
if type == "ymd":
return datetime.datetime.now().strftime('%Y-%m-%d')
elif type == "hms":
return datetime.datetime.now().strftime('%H:%M:%S')
elif type == "all":
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
else:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def RemoveChar(sSrc, CharList='~`!@#$%^&*()-+|\/<>{} '):
""" 해당 특수문자 제거 공백도 특수문자로 취급됨"""
# import re
#
# string = "Hey! What's up bro?"
# new_string = re.sub(r"[^a-zA-Z0-9]", "", string)
# print(new_string)
Result = ''
for I in range(len(sSrc)):
if not (sSrc[I] in CharList):
Result = Result + sSrc[I]
return Result
def fileMove(ifile, tDir, enc=False):
"""패스가 포함된 ifile, tDir로 무브 하는데 enc=true면 새이름으로 만들고 카피"""
# os.path.dirname 디렉토리 구하기
# os.path.basename
if enc: # 일련번호먹이기
tfile = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f") + os.path.basename(ifile)
shutil.copy(ifile, tDir + tfile)
else:
tfile = os.path.basename(ifile)
shutil.move(ifile, tDir + tfile)
return tDir, tfile
def ToDictlist(dfs):
# dataframe 을 dictlist 로 바꾸어 준다 쓸때는 for v in dictlist v["code"]
if issubclass(type(dfs), pd.DataFrame):
return dfs.to_dict('records')
return dfs
pbar = None
def show_progress(block_num, block_size, total_size):
"""progress bar를 위한 handler"""
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
def ExtractFileExt(filename):
return os.path.splitext(filename)[1]
def FileExists(filename):
return os.path.isfile(filename)
|
# Copyright 2020 <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from evaluate import calculate_total_pckh
from save_result_as_image import save_result_image
from configparser import ConfigParser
import getopt
import sys
from common import get_time_and_step_interval
import numpy as np
import os
import datetime
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
tf.random.set_seed(3)
print("tensorflow version :", tf.__version__) # 2.1.0
print("keras version :", tf.keras.__version__) # 2.2.4-tf
strategy = tf.distribute.TPUStrategy(resolver)
"""
python train.py --dataset_config=config/dataset/coco2017-gpu.cfg --experiment_config=config/training/experiment01.cfg
python train.py --dataset_config=config/dataset/ai_challenger-gpu.cfg --experiment_config=config/training/experiment01.cfg
"""
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(
argv, "d:e:", ["dataset_config=", "experiment_config="])
except getopt.GetoptError:
print('train_hourglass.py --dataset_config <inputfile> --experiment_config <outputfile>')
sys.exit(2)
dataset_config_file_path = "config/dataset/ai_challenger-colab.cfg"
experiment_config_file_path = "config/training/experiment04-cpm-sg4-colab.cfg"
for opt, arg in opts:
if opt == '-h':
print('train_middlelayer.py --dataset_config <inputfile> --experiment_config <outputfile>')
sys.exit()
elif opt in ("-d", "--dataset_config"):
dataset_config_file_path = arg
elif opt in ("-e", "--experiment_config"):
experiment_config_file_path = arg
parser = ConfigParser()
# get dataset config
print(dataset_config_file_path)
parser.read(dataset_config_file_path)
config_dataset = {}
for key in parser["dataset"]:
config_dataset[key] = eval(parser["dataset"][key])
# get training config
print(experiment_config_file_path)
parser.read(experiment_config_file_path)
config_preproc = {}
for key in parser["preprocessing"]:
config_preproc[key] = eval(parser["preprocessing"][key])
config_model = {}
for key in parser["model"]:
config_model[key] = eval(parser["model"][key])
config_extra = {}
for key in parser["extra"]:
config_extra[key] = eval(parser["extra"][key])
config_training = {}
for key in parser["training"]:
config_training[key] = eval(parser["training"][key])
config_output = {}
for key in parser["output"]:
config_output[key] = eval(parser["output"][key])
# "/Volumes/tucan-SSD/datasets"
dataset_root_path = config_dataset["dataset_root_path"]
# "coco_dataset"
dataset_directory_name = config_dataset["dataset_directory_name"]
dataset_path = os.path.join(dataset_root_path, dataset_directory_name)
# "/home/outputs" # "/Volumes/tucan-SSD/ml-project/outputs"
output_root_path = config_output["output_root_path"]
output_experiment_name = config_output["experiment_name"] # "experiment01"
sub_experiment_name = config_output["sub_experiment_name"] # "basic"
current_time = datetime.datetime.now().strftime("%m%d%H%M")
model_name = config_model["model_name"] # "simplepose"
model_subname = config_model["model_subname"]
output_name = f"{current_time}_{model_name}_{sub_experiment_name}"
output_path = os.path.join(
output_root_path, output_experiment_name, dataset_directory_name)
output_log_path = os.path.join(output_path, "logs", output_name)
# =================================================
# ============== prepare training =================
# =================================================
train_summary_writer = tf.summary.create_file_writer(output_log_path)
@tf.function
def train_step(model, images, labels):
with tf.GradientTape() as tape:
model_output = model(images)
predictions_layers = model_output
losses = [loss_object(labels, predictions)
for predictions in predictions_layers]
total_loss = tf.math.add_n(losses)
max_val = tf.math.reduce_max(predictions_layers[-1])
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(total_loss)
return total_loss, losses[-1], max_val
def val_step(step, images, heamaps):
predictions = model(images, training=False)
predictions = np.array(predictions)
save_image_results(step, images, heamaps, predictions)
@tf.function
def valid_step(model, images, labels):
predictions = model(images, training=False)
v_loss = loss_object(labels, predictions)
valid_loss(v_loss)
# valid_accuracy(labels, predictions)
return v_loss
def save_image_results(step, images, true_heatmaps, predicted_heatmaps):
val_image_results_directory = "val_image_results"
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(os.path.join(output_path, output_name)):
os.mkdir(os.path.join(output_path, output_name))
if not os.path.exists(os.path.join(output_path, output_name, val_image_results_directory)):
os.mkdir(os.path.join(output_path, output_name,
val_image_results_directory))
for i in range(images.shape[0]):
image = images[i, :, :, :]
heamap = true_heatmaps[i, :, :, :]
prediction = predicted_heatmaps[-1][i, :, :, :]
# result_image = display(i, image, heamap, prediction)
result_image_path = os.path.join(
output_path, output_name, val_image_results_directory, f"result{i}-{step:0>6d}.jpg")
save_result_image(result_image_path, image, heamap,
prediction, title=f"step:{int(step/1000)}k")
# print("val_step: save result image on \"" + result_image_path + "\"")
def save_model(model, step=None, label=None):
saved_model_directory = "saved_model"
if step is not None:
saved_model_directory = saved_model_directory + f"-{step:0>6d}"
if label is not None:
saved_model_directory = saved_model_directory + "-" + label
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(os.path.join(output_path, output_name)):
os.mkdir(os.path.join(output_path, output_name))
if not os.path.exists(os.path.join(output_path, output_name, saved_model_directory)):
os.mkdir(os.path.join(output_path, output_name, saved_model_directory))
saved_model_path = os.path.join(
output_path, output_name, saved_model_directory)
print("-"*20 + " MODEL SAVE!! " + "-"*20)
print("saved model path: " + saved_model_path)
model.save(saved_model_path)
print("-"*18 + " MODEL SAVE DONE!! " + "-"*18)
return saved_model_path
if __name__ == '__main__':
# ================================================
# ============= load hyperparams =================
# ================================================
# config_dataset = ...
# config_model = ...
# config_output = ...
# ================================================
# =============== load dataset ===================
# ================================================
from data_loader.data_loader import DataLoader
# dataloader instance gen
train_images = config_dataset["train_images"]
train_annotation = config_dataset["train_annotation"]
train_images_dir_path = os.path.join(dataset_path, train_images)
train_annotation_json_filepath = os.path.join(
dataset_path, train_annotation)
print(">> LOAD TRAIN DATASET FORM:", train_annotation_json_filepath)
dataloader_train = DataLoader(
images_dir_path=train_images_dir_path,
annotation_json_path=train_annotation_json_filepath,
config_training=config_training,
config_model=config_model,
config_preproc=config_preproc)
valid_images = config_dataset["valid_images"]
valid_annotation = config_dataset["valid_annotation"]
valid_images_dir_path = os.path.join(dataset_path, valid_images)
valid_annotation_json_filepath = os.path.join(
dataset_path, valid_annotation)
print(">> LOAD VALID DATASET FORM:", valid_annotation_json_filepath)
dataloader_valid = DataLoader(
images_dir_path=valid_images_dir_path,
annotation_json_path=valid_annotation_json_filepath,
config_training=config_training,
config_model=config_model,
config_preproc=config_preproc)
number_of_keypoints = dataloader_train.number_of_keypoints # 17
# train dataset
dataset_train = dataloader_train.input_fn()
dataset_valid = dataloader_valid.input_fn()
# validation images
val_images, val_heatmaps = dataloader_valid.get_images(
0, batch_size=25) # from 22 index 6 images and 6 labels
# ================================================
# =============== build model ====================
# ================================================
from model_provider import get_model
model = get_model(model_name=model_name,
model_subname=model_subname,
number_of_keypoints=number_of_keypoints,
config_extra=config_extra)
loss_object = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(
config_training["learning_rate"], epsilon=config_training["epsilon"])
train_loss = tf.keras.metrics.Mean(name="train_loss")
valid_loss = tf.keras.metrics.Mean(name="valid_loss")
valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="valid_accuracy")
# ================================================
# ============== train the model =================
# ================================================
num_epochs = config_training["number_of_epoch"] # 550
number_of_echo_period = config_training["period_echo"] # 100
number_of_validimage_period = None # 1000
number_of_modelsave_period = config_training["period_save_model"] # 5000
tensorbaord_period = config_training["period_tensorboard"] # 100
# validation_period = 2 # 1000
valid_check = False
valid_pckh = config_training["valid_pckh"] # True
pckh_distance_ratio = config_training["pckh_distance_ratio"] # 0.5
step = 1
# TRAIN!!
get_time_and_step_interval(step, is_init=True)
for epoch in range(num_epochs):
print("-" * 10 + " " + str(epoch + 1) + " EPOCH " + "-" * 10)
for images, heatmaps in dataset_train:
# print(images.shape) # (32, 128, 128, 3)
# print(heatmaps.shape) # (32, 32, 32, 17)
total_loss, last_layer_loss, max_val = strategy.run(
train_step, args=(model, images, heatmaps))
step += 1
if number_of_echo_period is not None and step % number_of_echo_period == 0:
total_interval, per_step_interval = get_time_and_step_interval(
step)
echo_textes = []
if step is not None:
echo_textes.append(f"step: {step}")
if total_interval is not None:
echo_textes.append(f"total: {total_interval}")
if per_step_interval is not None:
echo_textes.append(f"per_step: {per_step_interval}")
if total_loss is not None:
echo_textes.append(f"total loss: {total_loss:.6f}")
if last_layer_loss is not None:
echo_textes.append(f"last loss: {last_layer_loss:.6f}")
print(">> " + ", ".join(echo_textes))
# validation phase
if number_of_validimage_period is not None and step % number_of_validimage_period == 0:
val_step(step, val_images, val_heatmaps)
if number_of_modelsave_period is not None and step % number_of_modelsave_period == 0:
saved_model_path = save_model(model, step=step)
if valid_pckh:
# print("calcuate pckh")
pckh_score = calculate_total_pckh(saved_model_path=saved_model_path,
annotation_path=valid_annotation_json_filepath,
images_path=valid_images_dir_path,
distance_ratio=pckh_distance_ratio)
with train_summary_writer.as_default():
tf.summary.scalar(
f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100, step=step)
if tensorbaord_period is not None and step % tensorbaord_period == 0:
with train_summary_writer.as_default():
tf.summary.scalar(
"total_loss", total_loss.numpy(), step=step)
tf.summary.scalar(
"max_value - last_layer_loss", max_val.numpy(), step=step)
if last_layer_loss is not None:
tf.summary.scalar("last_layer_loss",
last_layer_loss.numpy(), step=step)
# if not valid_check:
# continue
# for v_images, v_heatmaps in dataloader_valid:
# v_loss = valid_step(model, sv_images, v_heatmaps)
# last model save
saved_model_path = save_model(model, step=step, label="final")
# last pckh
pckh_score = calculate_total_pckh(saved_model_path=saved_model_path,
annotation_path=valid_annotation_json_filepath,
images_path=valid_images_dir_path,
distance_ratio=pckh_distance_ratio)
with train_summary_writer.as_default():
tf.summary.scalar(
f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100, step=step)
|
"""This module encapsulates methods for running time course simulations.
main method provided is the :func:`run_time_course` method, that will
simulate the given model (or the current :func:`.get_current_model`).
Examples:
To run a time course for the duration of 10 time units use
>>> run_time_course(10)
To run a time course for the duration of 10 time units, in 50 simulation steps use
>>> run_time_course(10, 50)
To run a time course from 0, for the duration of 10 time units, in 50 simulation steps use:
>>> run_time_course(0, 10, 50)
all parameters can also be given as key value pairs.
"""
import COPASI
import pandas as pd
import basico
from . import model_io
from . import model_info
import pandas
import numpy
import logging
def __build_result_from_ts(time_series, use_concentrations=True, use_sbml_id=False, model=None):
# type: (COPASI.CTimeSeries) -> pandas.DataFrame
col_count = time_series.getNumVariables()
row_count = time_series.getRecordedSteps()
if use_sbml_id and model is None:
model = model_io.get_current_model()
column_names = []
column_keys = []
for i in range(col_count):
column_keys.append(time_series.getKey(i))
name = time_series.getTitle(i)
if use_sbml_id and name != 'Time':
sbml_id = time_series.getSBMLId(i, model)
if sbml_id:
name = sbml_id
column_names.append(name)
concentrations = numpy.empty([row_count, col_count])
for i in range(row_count):
for j in range(col_count):
if use_concentrations:
concentrations[i, j] = time_series.getConcentrationData(i, j)
else:
concentrations[i, j] = time_series.getData(i, j)
df = pandas.DataFrame(data=concentrations, columns=column_names)
df = df.set_index('Time')
return df
def __method_name_to_type(method_name):
methods = {
'deterministic': COPASI.CTaskEnum.Method_deterministic,
'lsoda': COPASI.CTaskEnum.Method_deterministic,
'hybridode45': COPASI.CTaskEnum.Method_hybridODE45,
'hybridlsoda': COPASI.CTaskEnum.Method_hybridLSODA,
'adaptivesa': COPASI.CTaskEnum.Method_adaptiveSA,
'tauleap': COPASI.CTaskEnum.Method_tauLeap,
'stochastic': COPASI.CTaskEnum.Method_stochastic,
'directmethod': COPASI.CTaskEnum.Method_directMethod,
'radau5': COPASI.CTaskEnum.Method_RADAU5,
'sde': COPASI.CTaskEnum.Method_stochasticRunkeKuttaRI5,
}
return methods.get(method_name.lower(), COPASI.CTaskEnum.Method_deterministic)
def run_time_course_with_output(output_selection, *args, **kwargs):
"""Simulates the current model, returning only the data specified in the output_selection array
:param output_selection: selection of elements to return, for example ['Time', '[ATP]', 'ATP.Rate'] to
return the time column, ATP concentration and the rate of change of ATP. The strings can be either
the Display names as can be found in COPASI, or the CN's of the elements.
:param args: positional arguments
* 1 argument: the duration to simulate the model
* 2 arguments: the duration and number of steps to take
* 3 arguments: start time, duration, number of steps
:param kwargs: additional arguments
- | `model`: to specify the data model to be used (if not specified
| the one from :func:`.get_current_model` will be taken)
- `use_initial_values` (bool): whether to use initial values
- `scheduled` (bool): sets whether the task is scheduled or not
- `update_model` (bool): sets whether the model should be updated, or reset to initial conditions.
- | `method` (str): sets the simulation method to use (otherwise the previously set method will be used)
| support methods:
|
| * `deterministic` / `lsoda`: the LSODA implementation
| * `stochastic`: the Gibson & Bruck Gillespie implementation
| * `directMethod`: Gillespie Direct Method
| * others: `hybridode45`, `hybridlsoda`, `adaptivesa`, `tauleap`, `radau5`, `sde`
- `duration` (float): the duration in time units for how long to simulate
- `automatic` (bool): whether to use automatic determined steps (True), or the specified interval / number of steps
- `output_event` (bool): if true, output will be collected at the time a discrete event occurs.
- | `values` ([float]): if given, output will only returned at the output points specified
| for example use `values=[0, 1, 4]` to return output only for those three times
- | `start_time` (float): the output start time. If the model is not at that start time, a simulation
| will be performed in one step, to reach it before starting to collect output.
- | `step_number` or `intervals` (int): the number of output steps. (will only be used if `automatic`
| or `stepsize` is not used.
- | `stepsize` (float): the output step size (will only be used if `automatic` is False).
- | `seed` (int): set the seed that will be used if `use_seed` is true, using this stochastic trajectories can
| be repeated
- | 'use_seed' (bool): if true, the specified seed will be used.
- | `a_tol` (float): the absolute tolerance to be used
- | `r_tol` (float): the relative tolerance to be used
- | `max_steps` (int): the maximum number of internal steps the integrator is allowed to use.
"""
model = kwargs.get('model', model_io.get_current_model())
dh, columns = create_data_handler(output_selection, model=model)
task, use_initial_values = _setup_timecourse(args, kwargs)
model.addInterface(dh)
result = task.initializeRaw(COPASI.CCopasiTask.OUTPUT_UI)
if not result:
logging.error("Error while initializing the simulation: " +
COPASI.CCopasiMessage.getLastMessage().getText())
else:
result = task.processRaw(use_initial_values)
if not result:
logging.error("Error while running the simulation: " +
COPASI.CCopasiMessage.getLastMessage().getText())
df = get_data_from_data_handler(dh, columns)
model.removeInterface(dh)
return df
def get_data_from_data_handler(dh, columns):
data = []
for i in range(dh.getNumRowsDuring()):
row = dh.getNthRow(i)
current_row = []
for element in row:
current_row.append(element)
data.append(current_row)
df = pd.DataFrame(data=data, columns=columns)
return df
def create_data_handler(output_selection, during=None, after=None, before=None, model=None):
"""Creates an output handler for the given selection
:param output_selection: list of display names or cns, of elements to capture
:type output_selection: [str]
:param during: optional list of elements from the output selection, that should be collected
during the run of the task
:type during: [str]
:param after: optional list of elements from the output selection, that should be collected
after the run of the task
:type after: [str]
:param before: optional list of elements from the output selection, that should be collected
before the run of the task
:type before: [str]
:param model: the model in which to resolve the display names
:return: tuple of the data handler from which to retrieve output later, and their columns
:rtype: (COPASI.CDataHandler, [])
"""
if model is None:
model = basico.get_current_model()
dh = COPASI.CDataHandler()
columns = []
for name in output_selection:
if name.startswith('CN='):
obj = model.getObject(COPASI.CCommonName(name))
if not obj:
logging.warning('no object for cn {0}'.format(name))
continue
cn = name
columns.append(obj.getObjectDisplayName())
else:
obj = model.findObjectByDisplayName(name)
if not obj:
logging.warning('no object for name {0}'.format(name))
continue
if isinstance(obj, COPASI.CModel):
obj = obj.getValueReference()
cn = obj.getCN().getString()
columns.append(name)
if during is None or (during is not None and name in during):
dh.addDuringName(COPASI.CRegisteredCommonName(cn))
if after and name in after:
dh.addAfterName(COPASI.CRegisteredCommonName(cn))
if before and name in before:
dh.addAfterName(COPASI.CRegisteredCommonName(cn))
return dh, columns
def run_time_course(*args, **kwargs):
"""Simulates the current or given model, returning a data frame with the results
:param args: positional arguments
* 1 argument: the duration to simulate the model
* 2 arguments: the duration and number of steps to take
* 3 arguments: start time, duration, number of steps
:param kwargs: additional arguments
- | `model`: to specify the data model to be used (if not specified
| the one from :func:`.get_current_model` will be taken)
- `use_initial_values` (bool): whether to use initial values
- `scheduled` (bool): sets whether the task is scheduled or not
- `update_model` (bool): sets whether the model should be updated, or reset to initial conditions.
- | `method` (str): sets the simulation method to use (otherwise the previously set method will be used)
| support methods:
|
| * `deterministic` / `lsoda`: the LSODA implementation
| * `stochastic`: the Gibson & Bruck Gillespie implementation
| * `directMethod`: Gillespie Direct Method
| * others: `hybridode45`, `hybridlsoda`, `adaptivesa`, `tauleap`, `radau5`, `sde`
- `duration` (float): the duration in time units for how long to simulate
- `automatic` (bool): whether to use automatic determined steps (True), or the specified interval / number of steps
- `output_event` (bool): if true, output will be collected at the time a discrete event occurs.
- | `values` ([float]): if given, output will only returned at the output points specified
| for example use `values=[0, 1, 4]` to return output only for those three times
- | `start_time` (float): the output start time. If the model is not at that start time, a simulation
| will be performed in one step, to reach it before starting to collect output.
- | `step_number` or `intervals` (int): the number of output steps. (will only be used if `automatic`
| or `stepsize` is not used.
- | `stepsize` (float): the output step size (will only be used if `automatic` is False).
- | `seed` (int): set the seed that will be used if `use_seed` is true, using this stochastic trajectories can
| be repeated
- | 'use_seed' (bool): if true, the specified seed will be used.
- | `a_tol` (float): the absolute tolerance to be used
- | `r_tol` (float): the relative tolerance to be used
- | `max_steps` (int): the maximum number of internal steps the integrator is allowed to use.
- | `use_concentrations` (bool): whether to return just the concentrations (default)
- | `use_numbers` (bool): return all elements collected
:return: data frame with simulation results
:rtype: pandas.DataFrame
"""
model = kwargs.get('model', model_io.get_current_model())
task, use_initial_values = _setup_timecourse(args, kwargs)
result = task.initializeRaw(COPASI.CCopasiTask.OUTPUT_UI)
if not result:
logging.error("Error while initializing the simulation: " +
COPASI.CCopasiMessage.getLastMessage().getText())
else:
result = task.processRaw(use_initial_values)
if not result:
logging.error("Error while running the simulation: " +
COPASI.CCopasiMessage.getLastMessage().getText())
use_concentrations = kwargs.get('use_concentrations', True)
if 'use_numbers' in kwargs and kwargs['use_numbers']:
use_concentrations = False
use_sbml_id = kwargs.get('use_sbml_id', False)
return __build_result_from_ts(task.getTimeSeries(), use_concentrations, use_sbml_id, model)
def _setup_timecourse(args, kwargs):
model = kwargs.get('model', model_io.get_current_model())
num_args = len(args)
use_initial_values = kwargs.get('use_initial_values', True)
task = model.getTask('Time-Course')
assert (isinstance(task, COPASI.CTrajectoryTask))
if 'scheduled' in kwargs:
task.setScheduled(kwargs['scheduled'])
if 'update_model' in kwargs:
task.setUpdateModel(kwargs['update_model'])
if 'method' in kwargs:
task.setMethodType(__method_name_to_type(kwargs['method']))
problem = task.getProblem()
assert (isinstance(problem, COPASI.CTrajectoryProblem))
if 'duration' in kwargs:
problem.setDuration(kwargs['duration'])
problem.setUseValues(False)
if 'automatic' in kwargs:
problem.setAutomaticStepSize(kwargs['automatic'])
if 'output_event' in kwargs:
problem.setOutputEvent(kwargs['output_event'])
if 'start_time' in kwargs:
problem.setOutputStartTime(kwargs['start_time'])
if 'step_number' in kwargs:
problem.setStepNumber(kwargs['step_number'])
if 'intervals' in kwargs:
problem.setStepNumber(kwargs['intervals'])
if 'stepsize' in kwargs:
problem.setStepSize(kwargs['stepsize'])
if 'values' in kwargs:
vals = kwargs['values']
if type(vals) != str:
new_vals = ''
for val in vals:
new_vals += ' ' + str(val)
vals = new_vals.strip()
problem.setValues(vals)
problem.setUseValues(True)
if 'use_values' in kwargs:
problem.setUseValues(kwargs['use_values'])
if num_args == 3:
problem.setOutputStartTime(args[0])
problem.setDuration(args[1])
problem.setStepNumber(args[2])
problem.setUseValues(False)
elif num_args == 2:
problem.setDuration(args[0])
problem.setStepNumber(args[1])
problem.setUseValues(False)
elif num_args > 0:
problem.setDuration(args[0])
problem.setTimeSeriesRequested(True)
method = task.getMethod()
if 'seed' in kwargs and method.getParameter('Random Seed'):
method.getParameter('Random Seed').setIntValue(int(kwargs['seed']))
if 'use_seed' in kwargs and method.getParameter('Random Seed'):
method.getParameter('Use Random Seed').setBoolValue(bool(kwargs['use_seed']))
if 'a_tol' in kwargs and method.getParameter('Absolute Tolerance'):
method.getParameter('Absolute Tolerance').setDblValue(float(kwargs['a_tol']))
if 'r_tol' in kwargs and method.getParameter('Relative Tolerance'):
method.getParameter('Relative Tolerance').setDblValue(float(kwargs['r_tol']))
if 'max_steps' in kwargs and method.getParameter('Max Internal Steps'):
method.getParameter('Max Internal Steps').setIntValue(int(kwargs['max_steps']))
if 'settings' in kwargs:
model_info.set_task_settings(task, kwargs['settings'])
return task, use_initial_values
|
<reponame>ever391/base-crawler<gh_stars>1-10
# coding:utf8
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from basecrawler import BaseCrawler, BeautifulSoup
import pymysql
from collections import OrderedDict
import re
import pymongo
class LianJia(BaseCrawler):
def __init__(self):
self.mysqldb = pymysql.connect("127.0.0.1", "root", "", "xiaoqu")
self.cursor = self.mysqldb.cursor()
self.mysqldb.charset = "utf8"
self.cursor.execute("set names utf8mb4")
self.cursor.execute('set names utf8mb4')
self.cursor.execute('SET CHARACTER SET utf8mb4;')
self.cursor.execute('SET character_set_connection=utf8mb4;')
self.mongo = pymongo.MongoClient(['127.0.0.1:27017'], maxPoolSize=10)
self.mongodb = self.mongo["wechat"]
super(LianJia, self).__init__()
def run(self):
page = 1
while 1:
url = "https://bj.lianjia.com/xiaoqu/pg%s/" % page
print page
self.get_content_urls(url)
page += 1
if page == 400:
break
def get_content_urls(self, url):
try:
resp = self.requests_get(url)
except Exception as e:
self.logger.error("req failure: ", str(e))
return
soup = BeautifulSoup(resp.text, 'lxml')
for node_a in soup.select('div.info div.title a'):
try:
content_url = node_a["href"]
except Exception as e:
print str(e)
continue
print content_url
data = self.get_content(content_url)
if not data:
data = OrderedDict()
data["list_url"] = url
data["content_url"] = content_url
self.insert_data(data)
def get_content(self, url):
try:
resp = self.requests_get(url)
except Exception as e:
self.logger.error("req content page failure: ", str(e))
return
soup = BeautifulSoup(resp.text, 'lxml')
data = OrderedDict()
try:
data["name"] = soup.select("h1.detailTitle")[0].get_text().strip()
except Exception as e:
self.logger.error("get name field failure: ", str(e))
try:
data["address"] = soup.select(("div.detailDesc"))[0].get_text().strip()
except Exception as e:
self.logger.error("get address field failure: ", str(e))
try:
data["price"] = soup.select("span.xiaoquUnitPrice")[0].get_text().strip()
except Exception as e:
self.logger.error("get price field failure: ", str(e))
try:
create_year = soup.select("div.xiaoquInfo > div:nth-of-type(1) > span.xiaoquInfoContent")[0].get_text().strip()
data["create_year"] = self.get_number(create_year)
except Exception as e:
self.logger.error("get create_year field failure: ", str(e))
try:
data["developer"] = soup.select("div.xiaoquInfo > div:nth-of-type(5) > span.xiaoquInfoContent")[0].get_text().strip()
except Exception as e:
self.logger.error("get developer field failure: ", str(e))
try:
buildings = soup.select("div.xiaoquInfo > div:nth-of-type(6) > span.xiaoquInfoContent")[0].get_text().strip()
data["buildings"] = self.get_number(buildings)
except Exception as e:
self.logger.error("get buildings field failure: ", str(e))
try:
total = soup.select("div.xiaoquInfo > div:nth-of-type(7) > span.xiaoquInfoContent")[0].get_text().strip()
data["total"] = self.get_number(total)
except Exception as e:
self.logger.error("get total field failure: ", str(e))
try:
data["province"] = soup.select("div.fl.l-txt a:nth-of-type(2)")[0].get_text().strip()
except Exception as e:
self.logger.error("get province field failure: ", str(e))
try:
data["city"] = soup.select("div.fl.l-txt a:nth-of-type(3)")[0].get_text().strip()
except Exception as e:
self.logger.error("get city field failure: ", str(e))
return data
def get_number(self, str):
res = re.search(r'\d+', str, flags=re.S).group()
return res
def insert_data(self, data):
try:
self.mongodb["xiaoqu"].insert(data)
except Exception as e:
self.logger.error("mongo insert failure: ", str(e))
sql = u'''insert into xiaoqu(id, province, city, `name`, total, price, create_year, developer, buildings, list_url, content_url, address)
VALUES (null, "{province}", "{city}", "{name}", {total}, {price}, {create_year}, "{developer}", {buildings}, "{list_url}", "{content_url}", "{address}")'''.format(
province=data.get("province", u"北京"),
city=data.get("city", u""),
name=data.get("name", u""),
total=data.get("total", 0),
price=data.get("price", 0),
create_year=data.get("create_year", 0),
developer=data.get("developer", u""),
buildings=data.get("buildings", 0),
list_url=data.get("list_url", u""),
content_url=data.get("content_url", u""),
address=data.get("address", u""),
)
self.cursor.execute(sql.encode("utf8"))
self.mysqldb.commit()
if __name__ == "__main__":
lj = LianJia()
lj.run() |
#!/usr/bin/env python3
import pysam
import numpy as np
import os
import sys
import logging
class ReconGene:
def __init__(self, tupleOfReadsA, tupleOfReadsB, fusion_idx_dir):
self.readsA = tupleOfReadsA
self.readsB = tupleOfReadsB
self.GeneAName = tupleOfReadsA[0].reference_name
self.GeneBName = tupleOfReadsB[0].reference_name
self.fusionOrient = None
self.GeneAPointPos = 0 # from 0 ,[start,stop), start(0-based inclusive), stop (0-based exclusive)
self.GeneBPointPos = 0
self.Aseq = ""
self.Bseq = ""
self.fusion_idx_dir = fusion_idx_dir
self.reconstructed_seqs = {}
'''
self.GeneALength = 2 * max([one.infer_r ead_length() for one in tupleOfReadsA])
self.GeneBLength = self.GeneALength
self.GeneARefSeq = ['' for i in range(self.GeneALength)]
self.GeneBRefSeq = ['' for i in range(self.GeneBLength)]
self.GeneAInferSeq = ['' for i in range(self.GeneALength)]
self.GeneBInferSeq = ['' for i in range(self.GeneBLength)]
self.fusionPointPos = [0,0] #left gene pos,right gene pos(start from 0)
'''
def find_fusionpoint(self, readsA, readsB):
candidateA = []
for one in readsA:
if one.reference_start is None or one.reference_end is None:
continue
if one.query_alignment_start > one.infer_read_length() - one.query_alignment_end: # 11S64M0S, 11>0 #one.query_alignment_end-one.infer_read_length()
if len(one.cigartuples) >= 4 and one.cigartuples[1][0] == 0 and one.cigartuples[2][0] == 3 and \
one.cigartuples[1][1] < 5:
candidateA.append(-int(one.reference_start + one.cigartuples[1][1] + one.cigartuples[2][1]))
else:
candidateA.append(-int(one.reference_start))
else: # 62M13S
if len(one.cigartuples) >= 4 and one.cigartuples[-2][0] == 0 and one.cigartuples[-3][0] == 3 and \
one.cigartuples[-2][1] < 5:
candidateA.append(one.reference_end - one.cigartuples[-2][1] + one.cigartuples[-3][1])
else:
candidateA.append(one.reference_end)
candidateGenePointPosA = Merge_breakpointer(dict(zip(*np.unique(candidateA, return_counts=True))))
PlusOritetion_numA = sum([candidateGenePointPosA[key] for key in candidateGenePointPosA.keys() if key > 0])
MinusOritetion_numA = sum([candidateGenePointPosA[key] for key in candidateGenePointPosA.keys() if key < 0])
candidateB = []
for one in readsB:
if one.reference_start is None or one.reference_end is None:
continue
if one.query_alignment_start > one.infer_read_length() - one.query_alignment_end: # 11S64M0S, 11>0 #one.query_alignment_end-one.infer_read_length()
if len(one.cigartuples) >= 4 and one.cigartuples[1][0] == 0 and one.cigartuples[2][0] == 3 and \
one.cigartuples[1][1] < 5:
candidateB.append(-int(one.reference_start + one.cigartuples[1][1] + one.cigartuples[2][1]))
else:
candidateB.append(-int(one.reference_start))
else: # 62M13S
if len(one.cigartuples) >= 4 and one.cigartuples[-2][0] == 0 and one.cigartuples[-3][0] == 3 and \
one.cigartuples[-2][1] < 5:
candidateB.append(one.reference_end - one.cigartuples[-2][1] + one.cigartuples[-3][1])
else:
candidateB.append(one.reference_end)
candidateGenePointPosB = Merge_breakpointer(dict(zip(*np.unique(candidateB, return_counts=True))))
PlusOritetion_numB = sum([candidateGenePointPosB[key] for key in candidateGenePointPosB.keys() if key > 0])
MinusOritetion_numB = sum([candidateGenePointPosB[key] for key in candidateGenePointPosB.keys() if key < 0])
if len(candidateGenePointPosA) > 0 and len(candidateGenePointPosA) > 0:
if (PlusOritetion_numA + MinusOritetion_numB) > (PlusOritetion_numB + MinusOritetion_numA):
# logging.info("Initial fusion oritentation:%s--->%s"%((readsA[0]).reference_name,(readsB[0].reference_name)))
orient_Flag = 1
else:
# logging.info("Initial fusion oritentation:%s--->%s"%(readsB[0].reference_name,readsA[0].reference_name))
orient_Flag = -1
if orient_Flag == 1:
candidateGenePointPosA_5 = dict([item for item in candidateGenePointPosA.items() if item[0] > 0])
candidateGenePointPosB_3 = dict([item for item in candidateGenePointPosB.items() if item[0] < 0])
if len(candidateGenePointPosA_5) > 0 and len(candidateGenePointPosB_3) > 0:
GeneAPointPos = (sorted(candidateGenePointPosA_5.items(), key=lambda item: item[1])[-1])[0]
GeneBPointPos = -int((sorted(candidateGenePointPosB_3.items(), key=lambda item: item[1])[-1])[0])
return orient_Flag, GeneAPointPos, GeneBPointPos
else:
candidateGenePointPosA_3 = dict([item for item in candidateGenePointPosA.items() if item[0] < 0])
candidateGenePointPosB_5 = dict([item for item in candidateGenePointPosB.items() if item[0] > 0])
if len(candidateGenePointPosA_3) > 0 and len(candidateGenePointPosB_5) > 0:
GeneAPointPos = -int(sorted(candidateGenePointPosA_3.items(), key=lambda item: item[1])[-1][0])
GeneBPointPos = (sorted(candidateGenePointPosB_5.items(), key=lambda item: item[1])[-1])[0]
return orient_Flag, GeneAPointPos, GeneBPointPos
return None, 0, 0
def get_paired_point(self):
self.fusionOrient, self.GeneAPointPos, self.GeneBPointPos = self.find_fusionpoint(self.readsA, self.readsB)
def readseq(self):
Afasta = ''
Bfasta = ''
RECORD = False
with open(os.path.join(self.fusion_idx_dir, "fusiongenes_ref_U.fa")) as Uf:
for line in Uf:
if line.startswith('>'):
if self.GeneAName in line:
RECORD = True
continue
else:
RECORD = False
if RECORD == True:
Afasta += line
RECORD = False
with open(os.path.join(self.fusion_idx_dir, "fusiongenes_ref_V.fa")) as Vf:
for line in Vf:
if line.startswith('>'):
if self.GeneBName in line:
RECORD = True
continue
else:
RECORD = False
if RECORD == True:
Bfasta += line
RECORD = False
return ''.join(Afasta.split('\n')), ''.join(Bfasta.split('\n'))
def write_inferred_fusion_gene(self):
self.Aseq, self.Bseq = (seq.upper() for seq in self.readseq())
self.reconstructed_seqs[self.GeneAName + '-' + self.GeneBName] = connect_sequence(
self.Aseq[:self.GeneAPointPos], self.Bseq[self.GeneBPointPos:])
self.reconstructed_seqs[self.GeneBName + '-' + self.GeneAName] = connect_sequence(
self.Bseq[:self.GeneBPointPos], self.Aseq[self.GeneAPointPos:])
with open("temp/inferred_fusion.fa", 'a') as f:
temp = ''
for fusiongene in self.reconstructed_seqs.keys():
if self.reconstructed_seqs[fusiongene] != "":
temp += '\n'.join(['>' + fusiongene, self.reconstructed_seqs[fusiongene], ''])
f.write(temp)
def connect_sequence(seq_head, seq_tail):
if seq_tail == "" or seq_head == "":
return ""
seq_head = seq_head.upper()
seq_tail = seq_tail.lower()
if seq_head[-1] != seq_tail[0].upper():
return seq_head + seq_tail
else:
for common_len in range(1, int(0.5 * min(len(seq_head), len(seq_tail)))):
if seq_head[-1 - common_len] != seq_tail[0 + common_len].upper():
break
return seq_head + seq_tail[common_len:]
def common_seq(seq_head, seq_tail):
if seq_tail == "" or seq_head == "":
return "."
seq_head = seq_head.upper()
seq_tail = seq_tail.lower()
if seq_head[-1] != seq_tail[0].upper():
return "."
else:
for common_len in range(1, int(0.5 * min(len(seq_head), len(seq_tail)))):
if seq_head[-1 - common_len] != seq_tail[0 + common_len].upper():
break
return seq_head[-common_len:]
def selectReads(queryNameList, Reads):
selectedReads = []
for one in Reads:
if one.query_name in queryNameList:
selectedReads.append(one)
return tuple(selectedReads)
def Merge_breakpointer(dict_var):
New_dict = {}
Positions = list(dict_var.keys())
for m in range(len(Positions)):
for n in range(m + 1, len(Positions)):
if abs(int(Positions[m]) - int(Positions[n])) > 20:
continue
else:
if dict_var[Positions[m]] >= dict_var[Positions[n]]:
dict_var[Positions[m]] = dict_var[Positions[m]] + dict_var[Positions[n]]
dict_var[Positions[n]] = 0
else:
dict_var[Positions[n]] = dict_var[Positions[n]] + dict_var[Positions[m]]
dict_var[Positions[m]] = 0
for i in dict_var.keys():
if dict_var[i] != 0:
New_dict[i] = dict_var[i]
return New_dict
def build_fusion(fasta_path):
os.system("hisat2-build -q {fasta} {idx}".format(fasta=fasta_path, idx=fasta_path))
def reconstruct(filtered_U_sam, filtered_V_sam, Gene_coordinate, fusion_idx_dir=''):
Usam = tuple(pysam.AlignmentFile(filtered_U_sam, 'r'))
Vsam = tuple(pysam.AlignmentFile(filtered_V_sam, 'r'))
refNameDic = {}
for Uread in Usam:
if Uread.reference_name not in refNameDic and Uread.reference_name != '*':
refNameDic[Uread.reference_name] = set([Uread.query_name])
else:
refNameDic[Uread.reference_name].add(Uread.query_name)
for Vread in Vsam:
if Vread.reference_name not in refNameDic and Vread.reference_name != '*':
refNameDic[Vread.reference_name] = set([Vread.query_name])
else:
refNameDic[Vread.reference_name].add(Vread.query_name)
fusionnameDic = {}
with open(os.path.join(fusion_idx_dir, "fusion_table.tsv"), 'r') as f:
for line in f:
if not line.startswith('#'):
temp = line.rstrip().split('\t')[0].split('-')
start = temp[0]
end = temp[1]
if len(temp) == 3:
# HLA-A
start = temp[0] + '-' + temp[1]
end = temp[2]
if start in refNameDic and end in refNameDic:
fusionnameDic[line.split('\t')[0]] = refNameDic[start] & refNameDic[end]
with open(Gene_coordinate, 'r') as f:
Gene_coordinate = {}
for line in f:
Line = ((line.strip('\n')).strip()).split('\t')
Chr, Strand, Start, End = Line[0], Line[1], Line[2], Line[3]
Gene = Line[-1]
Gene_coordinate[Gene] = (Chr, Strand, Start, End)
if os.path.isfile("temp/inferred_fusion.fa"):
os.system("rm -rf temp/inferred_fusion.fa")
if os.path.isfile("temp/inferred_fusion_results.tsv"):
os.system("rm -rf temp/inferred_fusion_results.tsv")
for key in fusionnameDic.keys():
if len(fusionnameDic[key]) >= 2:
recgene = ReconGene(selectReads(fusionnameDic[key], Usam), selectReads(fusionnameDic[key], Vsam),
fusion_idx_dir)
recgene.get_paired_point()
if recgene.fusionOrient == None or recgene.GeneAPointPos == -1 or recgene.GeneBPointPos == -1: # Judge the breakpoints
continue
if Gene_coordinate[recgene.GeneAName][0] != Gene_coordinate[recgene.GeneBName][
0]: # A , B in different Chromosomes
Translocation_flag = True
else:
if int(Gene_coordinate[recgene.GeneAName][2]) < int(Gene_coordinate[recgene.GeneBName][2]):
Dist = abs(int(Gene_coordinate[recgene.GeneAName][3]) - int(Gene_coordinate[recgene.GeneBName][2]))
else:
Dist = abs(int(Gene_coordinate[recgene.GeneAName][2]) - int(Gene_coordinate[recgene.GeneBName][3]))
if Dist > 100000: # A,B in Same Chromosome and separetes distant enough
Translocation_flag = True
else:
Translocation_flag = False
if Translocation_flag == True:
recgene.write_inferred_fusion_gene()
with open("temp/inferred_fusion_results.tsv", 'a') as f:
f.write(
"{fusion5}\t{fusion3}\t{f5_chr}\t{f5_strand}\t{f5_coords}\t{f5_pos}\t{f3_chr}\t{f3_strand}\t{f3_coords}\t{f3_pos}\t{f5_seq}\t{f3_seq}\t{f53_com}\t{seqlen}\n".format(
fusion5=recgene.GeneAName,
fusion3=recgene.GeneBName,
f5_chr=Gene_coordinate[recgene.GeneAName][0],
f5_strand=Gene_coordinate[recgene.GeneAName][1],
f5_coords=Gene_coordinate[recgene.GeneAName][2]+':'+Gene_coordinate[recgene.GeneAName][-1],
f5_pos=recgene.GeneAPointPos,
f3_chr=Gene_coordinate[recgene.GeneBName][0],
f3_strand=Gene_coordinate[recgene.GeneBName][1],
f3_coords=Gene_coordinate[recgene.GeneBName][2]+':'+Gene_coordinate[recgene.GeneBName][-1],
f3_pos=recgene.GeneBPointPos,
f5_seq=recgene.Aseq[recgene.GeneAPointPos - 10:recgene.GeneAPointPos],
f3_seq=recgene.Bseq[recgene.GeneBPointPos:recgene.GeneBPointPos + 10],
f53_com=common_seq(recgene.Aseq[recgene.GeneAPointPos - 10:recgene.GeneAPointPos],
recgene.Bseq[recgene.GeneBPointPos:recgene.GeneBPointPos + 10]),
seqlen=str(len(recgene.reconstructed_seqs['-'.join((recgene.GeneAName, recgene.GeneBName))]))))
# start:f5_end f3_start:f3_end
f.write(
"{fusion5}\t{fusion3}\t{f5_chr}\t{f5_strand}\t{f5_coords}\t{f5_pos}\t{f3_chr}\t{f3_strand}\t{f3_coords}\t{f3_pos}\t{f5_seq}\t{f3_seq}\t{f53_com}\t{seqlen}\n".format(
fusion5=recgene.GeneBName,
fusion3=recgene.GeneAName,
f5_chr=Gene_coordinate[recgene.GeneBName][0],
f5_strand=Gene_coordinate[recgene.GeneBName][1],
f5_coords=Gene_coordinate[recgene.GeneBName][2]+':'+Gene_coordinate[recgene.GeneBName][-1],
f5_pos=recgene.GeneBPointPos,
f3_chr=Gene_coordinate[recgene.GeneAName][0],
f3_strand=Gene_coordinate[recgene.GeneAName][1],
f3_coords=Gene_coordinate[recgene.GeneAName][2]+':'+Gene_coordinate[recgene.GeneAName][-1],
f3_pos=recgene.GeneAPointPos,
f5_seq=recgene.Bseq[recgene.GeneBPointPos - 10:recgene.GeneBPointPos],
f3_seq=recgene.Aseq[recgene.GeneAPointPos:recgene.GeneAPointPos + 10],
f53_com=common_seq(recgene.Bseq[recgene.GeneBPointPos - 10:recgene.GeneBPointPos],
recgene.Aseq[recgene.GeneAPointPos:recgene.GeneAPointPos + 10]),
seqlen=str(
len(recgene.reconstructed_seqs['-'.join((recgene.GeneBName, recgene.GeneAName))]))))
if fusionnameDic != {}:
build_fusion("temp/inferred_fusion.fa")
return 0
return None
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(name)s %(levelname)s %(message)s",
datefmt='%Y-%m-%d %H:%M:%S %a')
reconstruct(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
import datetime
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.core.models import Page, Site
from wagtail.tests.utils import WagtailTestUtils
from tests.app.models import NewsIndex, NewsItem
def dt(*args):
return datetime.datetime(*args, tzinfo=timezone.get_current_timezone())
def noop(x):
return x
class TestNewsList(TestCase, WagtailTestUtils):
def setUp(self):
super(TestNewsList, self).setUp()
site = Site.objects.get(is_default_site=True)
root_page = site.root_page
self.index = NewsIndex(
title='News', slug='news')
root_page.add_child(instance=self.index)
def test_index(self):
item1 = NewsItem.objects.create(
newsindex=self.index,
title='One post',
date=dt(2015, 8, 24, 0, 0, 0))
item2 = NewsItem.objects.create(
newsindex=self.index,
title='Two post',
date=dt(2015, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item1, item2], transform=noop)
def test_archive_year(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015',
date=dt(2015, 8, 24, 0, 0, 0))
item2014 = NewsItem.objects.create(
newsindex=self.index,
title='2014',
date=dt(2014, 8, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2013',
date=dt(2013, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'year', kwargs={'year': '2014'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item2014], transform=noop)
def test_archive_month(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 0, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-07-24',
date=dt(2015, 7, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-06-24',
date=dt(2015, 6, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2014-07-24',
date=dt(2014, 7, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'month', kwargs={'year': '2015', 'month': '7'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
def test_archive_day(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 12, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-08-23',
date=dt(2015, 8, 23, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-22',
date=dt(2015, 8, 22, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-07-23',
date=dt(2015, 7, 23, 12, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'day', kwargs={'year': '2015', 'month': '8', 'day': '23'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
@override_settings(ALLOWED_HOSTS=['localhost', 'site-a.com', 'site-b.org'])
class TestMultipleSites(TestCase, WagtailTestUtils):
def setUp(self):
super(TestMultipleSites, self).setUp()
root = Page.objects.get(pk=1)
root_a = Page(
title='Home A', slug='home-a')
root.add_child(instance=root_a)
root_b = Page(
title='Home B', slug='home-b')
root.add_child(instance=root_b)
self.index_a = NewsIndex(title='News A', slug='news-a')
root_a.add_child(instance=self.index_a)
self.index_b = NewsIndex(title='News B', slug='news-b')
root_b.add_child(instance=self.index_b)
self.site_a = Site.objects.create(
hostname='site-a.com',
root_page=root_a)
self.site_b = Site.objects.create(
hostname='site-b.org',
root_page=root_b)
self.item_a = NewsItem.objects.create(
newsindex=self.index_a, title='Post A', date=dt(2015, 8, 1))
self.item_b = NewsItem.objects.create(
newsindex=self.index_b, title='Post B', date=dt(2015, 8, 2))
@classmethod
def tearDownClass(cls):
super(TestMultipleSites, cls).tearDownClass()
# Clear site cache when the tests finish to prevent other tests being
# polluted by a stale cache.
cache.delete('wagtail_site_root_paths')
def test_index(self):
response = self.client.get(self.index_a.url,
HTTP_HOST=self.site_a.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_a], transform=noop)
response = self.client.get(self.index_b.url,
HTTP_HOST=self.site_b.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_b], transform=noop)
def test_item_url(self):
self.assertEqual(
self.item_a.url(), 'http://{}/{}/2015/8/1/{}-{}/'.format(
self.site_a.hostname, self.index_a.slug,
self.item_a.pk, self.item_a.get_nice_url()))
self.assertEqual(
self.item_b.url(), 'http://{}/{}/2015/8/2/{}-{}/'.format(
self.site_b.hostname, self.index_b.slug,
self.item_b.pk, self.item_b.get_nice_url()))
def test_item(self):
response = self.client.get(self.item_a.url(),
HTTP_HOST=self.site_a.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_a)
response = self.client.get(self.item_b.url(),
HTTP_HOST=self.site_b.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_b)
|
<gh_stars>0
import traceback, logging
from datetime import datetime, timedelta, date
from decimal import Decimal
from dateutil.relativedelta import relativedelta
from django.urls import reverse
from django.test import TestCase, override_settings
from django.utils.timezone import localtime, now
from django.contrib.auth.models import User
from nadine.models.billing import BillingBatch, UserBill, Payment
from nadine.models.membership import MembershipPackage, SubscriptionDefault
from nadine.models.membership import Membership, ResourceSubscription
from nadine.models.organization import Organization
from nadine.models.resource import Resource, Room
from nadine.models.usage import CoworkingDay, Event
today = localtime(now()).date()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
one_week_from_now = today + timedelta(days=7)
one_month_from_now = today + relativedelta(months=1)
one_month_ago = today - relativedelta(months=1)
two_months_ago = today - relativedelta(months=2)
two_weeks_ago = today - timedelta(days=14)
two_weeks_from_now = today + timedelta(days=14)
two_months_from_now = today + relativedelta(months=2)
def print_all_bills(user):
for bill in UserBill.objects.filter(user=user):
print_bill(bill)
def print_bill(bill):
print(("UserBill %d" % bill.id))
print((" user: %s" % bill.user))
print((" due_date: %s" % bill.due_date))
print((" period_start: %s" % bill.period_start))
print((" period_end: %s" % bill.period_end))
if bill.is_closed:
print((" closed_ts: %s" % bill.closed_ts))
print((" amount: $%s" % bill.amount))
print(" line_items:")
for line_item in bill.line_items.all().order_by('id'):
print((" %s: $%s" % (line_item.description, line_item.amount)))
@override_settings(SUSPEND_MEMBER_ALERTS=True)
class BillingTestCase(TestCase):
def setUp(self):
# Turn on logging for nadine models
# logging.getLogger('nadine.models').setLevel(logging.DEBUG)
logging.getLogger('nadine.models').setLevel(logging.INFO)
# Advocate Package
self.advocatePackage = MembershipPackage.objects.create(name='Advocate')
SubscriptionDefault.objects.create(
package=self.advocatePackage,
resource = Resource.objects.day_resource,
monthly_rate = 30,
overage_rate = 20,
)
# Basic Package
self.basicPackage = MembershipPackage.objects.create(name="Basic")
SubscriptionDefault.objects.create(
package = self.basicPackage,
resource = Resource.objects.day_resource,
monthly_rate = 50,
allowance = 3,
overage_rate = 15,
)
# PT5 Package
self.pt5Package = MembershipPackage.objects.create(name="PT5")
SubscriptionDefault.objects.create(
package = self.pt5Package,
resource = Resource.objects.day_resource,
monthly_rate = 75,
allowance = 5,
overage_rate = 20,
)
#PT10 Package
self.pt10Package = MembershipPackage.objects.create(name="PT10")
SubscriptionDefault.objects.create(
package = self.pt10Package,
resource = Resource.objects.day_resource,
monthly_rate = 180,
allowance = 10,
overage_rate = 20,
)
# PT15 Packge
self.pt15Package = MembershipPackage.objects.create(name="PT15")
SubscriptionDefault.objects.create(
package = self.pt15Package,
resource = Resource.objects.day_resource,
monthly_rate = 225,
allowance = 15,
overage_rate = 20,
)
# Resident Package
self.residentPackage = MembershipPackage.objects.create(name="Resident")
SubscriptionDefault.objects.create(
package = self.residentPackage,
resource = Resource.objects.desk_resource,
monthly_rate = 475,
allowance = 1,
overage_rate = 0,
)
SubscriptionDefault.objects.create(
package = self.residentPackage,
resource = Resource.objects.day_resource,
monthly_rate = 0,
allowance = 5,
overage_rate = 20,
)
#T20 Package
self.t20Package = MembershipPackage.objects.create(name="T20")
SubscriptionDefault.objects.create(
package = self.t20Package,
resource = Resource.objects.day_resource,
monthly_rate = 360,
allowance = 20,
overage_rate = 20
)
#T20 Package
self.t40Package = MembershipPackage.objects.create(name="T40")
SubscriptionDefault.objects.create(
package = self.t40Package,
resource = Resource.objects.day_resource,
monthly_rate = 720,
allowance = 40,
overage_rate = 20
)
#Team Package
self.teamPackage = MembershipPackage.objects.create(name="Team")
SubscriptionDefault.objects.create(
package = self.teamPackage,
resource = Resource.objects.day_resource,
monthly_rate = 0,
allowance = 0,
overage_rate = 0
)
#Event Package
self.eventPackage = MembershipPackage.objects.create(name='Events')
SubscriptionDefault.objects.create(
package = self.eventPackage,
resource = Resource.objects.event_resource,
monthly_rate = 100,
allowance = 10,
overage_rate = 20
)
def test_drop_in_on_billing_date_is_associated_with_correct_bill(self):
# PT-5 5/20/2010 - 6/19/2010 & Basic since 6/20/2010
# Daily activity 6/11/2010 through 6/25/2010
user = User.objects.create(username='member_eight', first_name='Member', last_name='Eight')
membership = Membership.objects.for_user(user)
membership.bill_day = 20
membership.save()
membership.set_to_package(self.pt5Package, start_date=date(2010, 5, 20), end_date=date(2010, 6, 19))
membership.set_to_package(self.basicPackage, start_date=date(2010, 6, 20))
days = {}
for day in range(11, 25):
visit_date = date(2010, 6, day)
days[visit_date] = CoworkingDay.objects.create(user=user, visit_date=visit_date, payment='Bill')
# Run the billing batch
batch = BillingBatch.objects.run(start_date=date(2010, 5, 20), end_date=date(2010, 7, 20))
self.assertTrue(batch.successful)
self.assertEqual(3, batch.bills.count())
# May 20th bill = PT5 with 9 days
# Total = $75 + 4 * $20 = $155
self.assertEqual(membership.matching_package(date(2010, 5, 20)), self.pt5Package)
may_20_bill = user.bills.get(period_start=date(2010, 5, 20))
self.assertTrue(may_20_bill != None)
self.assertEqual(155.00, may_20_bill.amount)
self.assertEqual(9, may_20_bill.coworking_days().count())
self.assertTrue(days[date(2010, 6, 11)] in may_20_bill.coworking_days())
self.assertTrue(days[date(2010, 6, 19)] in may_20_bill.coworking_days())
self.assertFalse(days[date(2010, 6, 20)] in may_20_bill.coworking_days())
self.assertTrue(may_20_bill.is_closed)
# June 20th bill = Basic + 2 overage
# Total = $50 + 2 * $15 = $80
june_20_bill = user.bills.get(period_start=date(2010, 6, 20))
self.assertTrue(june_20_bill != None)
self.assertEqual(80, june_20_bill.amount)
self.assertEqual(5, june_20_bill.coworking_days().count())
self.assertFalse(days[date(2010, 6, 19)] in june_20_bill.coworking_days())
self.assertTrue(days[date(2010, 6, 20)] in june_20_bill.coworking_days())
self.assertTrue(days[date(2010, 6, 24)] in june_20_bill.coworking_days())
self.assertTrue(june_20_bill.is_closed)
def test_guest_membership_bills(self):
# User 6 & 7 = PT-5 starting 1/1/2008
# User 7 guest of User 6
user6 = User.objects.create(username='member_six', first_name='Member', last_name='Six')
user6_membership = Membership.objects.for_user(user6)
user6_membership.set_to_package(self.pt5Package, start_date=date(2008, 1, 1), end_date=None, bill_day=1)
user7 = User.objects.create(username='member_seven', first_name='Member', last_name='Seven')
user7_membership = Membership.objects.for_user(user7)
user7_membership.set_to_package(self.pt5Package, start_date=date(2008, 1, 1), paid_by=user6, bill_day=1)
user8 = User.objects.create(username='member_eight', first_name='Member', last_name='Eight')
# User 7 has daily activity 6/1/2010 through 6/15/2010
days = {}
for day in range(1, 16):
visit_date = date(2010, 6, day)
days[visit_date] = CoworkingDay.objects.create(user=user7, visit_date=visit_date, payment='Bill')
# User 8 has 1 visit on 6/10/2010 guest of User 6
user8_visit = CoworkingDay.objects.create(user=user8, paid_by=user6, visit_date=date(2010, 6, 1), payment='Bill')
# Run the billing batch for June only
batch = BillingBatch.objects.run(start_date=date(2010, 6, 1), end_date=date(2010, 6, 30))
self.assertTrue(batch.successful)
# User 7 is a guest of User 6
self.assertTrue(user7.profile.is_guest())
self.assertTrue(user6 in user7.profile.hosts())
self.assertTrue(user7 in user6.profile.guests())
# Total: $75 * 2 + 6 Overage Days at $20 = $270
bill = user6.bills.get(period_start=date(2010, 6, 1))
self.assertEqual(270, bill.amount)
self.assertEqual(10, bill.resource_allowance(Resource.objects.day_resource))
self.assertEqual(16, bill.coworking_days().count())
self.assertTrue(user8_visit in bill.coworking_days())
# User 6 owes $270, User 7 and User 8 owe $0
self.assertEqual(270, user6.profile.outstanding_amount)
self.assertEqual(0, user7.profile.outstanding_amount)
self.assertEqual(0, user8.profile.outstanding_amount)
def test_change_bill_day(self):
# PT5 from 1/10/2010 billed on the 10th
user = User.objects.create(username='test_user', first_name='Test', last_name='User')
membership = Membership.objects.for_user(user)
membership.bill_day = 10
membership.set_to_package(self.pt5Package, start_date=date(2010, 1, 10))
self.assertEqual(10, membership.bill_day)
# Three days of activity on 5/9, 5/15, and 6/2
day1 = CoworkingDay.objects.create(user=user, visit_date=date(2010, 5, 9), payment='Bill')
day2 = CoworkingDay.objects.create(user=user, visit_date=date(2010, 5, 15), payment='Bill')
day3 = CoworkingDay.objects.create(user=user, visit_date=date(2010, 6, 2), payment='Bill')
# Generate bills for the three days we created
batch = BillingBatch.objects.run(start_date=date(2010, 5, 9), end_date=date(2010, 6, 2))
self.assertTrue(batch.successful)
self.assertEqual(2, batch.bills.count())
print_all_bills(user)
# Day 1 ended up on April 10th bill
april_10_bill = user.bills.get(period_start=date(2010, 4, 10))
self.assertTrue(day1 in april_10_bill.coworking_days())
self.assertFalse(day2 in april_10_bill.coworking_days())
self.assertFalse(day3 in april_10_bill.coworking_days())
# Days 2 and 3 ended up on May 10th bill
may_10_bill = user.bills.get(period_start=date(2010, 5, 10))
self.assertFalse(day1 in may_10_bill.coworking_days())
self.assertTrue(day2 in may_10_bill.coworking_days())
self.assertTrue(day3 in may_10_bill.coworking_days())
# April bill is closed, May bill is still open
self.assertTrue(april_10_bill.is_closed)
self.assertTrue(may_10_bill.is_open)
# Change the bill date to the 1st
membership.change_bill_day(date(2010, 6, 1))
self.assertEqual(1, membership.bill_day)
# Generate the bills again
batch = BillingBatch.objects.run(start_date=date(2010, 5, 1), end_date=date(2010, 6, 2))
self.assertTrue(batch.successful)
self.assertEqual(1, batch.bills.count())
print_all_bills(user)
# Make sure the 6/2 day got on the new June bill
june_1_bill = user.bills.get(period_start=date(2010, 6, 1))
self.assertFalse(day1 in june_1_bill.coworking_days())
self.assertFalse(day2 in june_1_bill.coworking_days())
self.assertTrue(day3 in june_1_bill.coworking_days())
def test_start_package(self):
#New user joins and starts a PT5 membership the same day
user = User.objects.create(username='member_one', first_name='Member', last_name='One')
membership = Membership.objects.for_user(user)
membership.bill_day = 1
membership.set_to_package(self.pt5Package, start_date=date(2017, 6, 1))
self.assertEqual(1, membership.bill_day)
self.assertEqual(5, membership.allowance_by_resource(resource=1))
self.assertEqual('PT5', membership.package_name())
# Generate the bill at start
batch = BillingBatch.objects.run(start_date=date(2017, 6, 1), end_date=date(2017, 7, 1))
self.assertTrue(batch.successful)
july_bill = user.bills.get(period_start=date(2017, 7, 1))
print_bill(july_bill)
self.assertTrue(july_bill != None)
self.assertEqual(75, july_bill.amount)
def test_backdated_new_user_and_membership(self):
# New user starts Advocate membership backdated 2 weeks
user = User.objects.create(username='member_two', first_name='Member', last_name='Two')
membership = Membership.objects.for_user(user)
membership.bill_day = two_weeks_ago.day
membership.save()
membership.set_to_package(self.advocatePackage, start_date=two_weeks_ago)
self.assertEqual('Advocate', membership.package_name())
next_start_date = membership.next_period_start()
# Generate bill at start of membership
batch = BillingBatch.objects.run(start_date=two_weeks_ago, end_date=two_weeks_ago)
self.assertTrue(batch.successful)
bill_today = user.bills.get(period_start=two_weeks_ago)
self.assertEqual(30, bill_today.amount)
self.assertTrue((membership.next_period_start() - timedelta(days=1)) == bill_today.due_date)
self.assertTrue(bill_today.is_open)
# Generate the next month's bill
batch = BillingBatch.objects.run(start_date=next_start_date, end_date=next_start_date)
self.assertTrue(batch.successful)
next_bill = user.bills.get(period_start=next_start_date)
self.assertTrue(next_bill.amount == bill_today.amount)
self.assertEqual(30, next_bill.amount)
def test_new_user_new_membership_with_end_date(self):
user = User.objects.create(username='member_three', first_name='Member', last_name='Three')
membership = Membership.objects.for_user(user)
self.assertFalse(membership.package_name() != None)
# Set end date one month from now
end = one_month_from_now - timedelta(days=1)
membership.bill_day = today.day
self.assertEqual(today.day, membership.bill_day)
membership.set_to_package(self.pt10Package, start_date=today, end_date=end)
self.assertEqual(10, membership.allowance_by_resource(Resource.objects.day_resource))
self.assertTrue(membership.end_date != None)
# No bill generate the previous month
last_months_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(last_months_batch.successful)
self.assertTrue(0 == len(user.bills.filter(period_start=one_month_ago)))
# Test for current bill
end_of_this_period = one_month_from_now - timedelta(days=1)
current_month_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(current_month_batch.successful)
current_bill = user.bills.get(period_start=today)
self.assertEqual(end_of_this_period, current_bill.due_date)
self.assertTrue(current_bill.amount == 180)
# Due to end_date, there should be no bill next month
run_next_month_batch = BillingBatch.objects.run(start_date=one_month_from_now, end_date=one_month_from_now)
self.assertTrue(len(user.bills.filter(period_start=one_month_from_now)) == 0)
def test_backdated_new_membership_with_end_date(self):
# Membership start date of two weeks ago and ending in two weeks
start = two_weeks_ago
end = (start + relativedelta(months=1)) - timedelta(days=1)
user = User.objects.create(username='member_four', first_name='Member', last_name='Four')
membership = Membership.objects.for_user(user)
self.assertTrue(membership.package_name() == None)
# Start PT5 membership two weeks ago
membership.bill_day = start.day
membership.set_to_package(self.pt5Package, start_date=start, end_date=end)
self.assertTrue(membership.package_name() == 'PT5')
self.assertEqual(5, membership.allowance_by_resource(Resource.objects.day_resource))
# No previous bill since there was no membership
run_last_month_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(len(user.bills.filter(period_start=one_month_ago)) == 0)
# Test Current Bill
current_batch = BillingBatch.objects.run(start_date=start, end_date=end)
current_bill = user.bills.get(period_start=start)
self.assertEqual(75, current_bill.amount)
self.assertEqual(1, current_bill.line_items.all().count())
# Test next months bill
next_months_batch = BillingBatch.objects.run(start_date=end, end_date=one_month_from_now)
self.assertTrue(0 == len(user.bills.filter(period_start=one_month_from_now)))
def test_new_membership_package_paid_by_other_member(self):
user = User.objects.create(username='member_five', first_name='Member', last_name='Five')
payer = User.objects.create(username='member_nine', first_name='Member', last_name='Nine')
membership = Membership.objects.for_user(user)
payer_membership = Membership.objects.for_user(payer)
# Payer's bill date is the 12th and has no active subscriptions
payer_membership.bill_day = 12
payer_membership.save()
self.assertEqual(0, payer_membership.active_subscriptions().count())
# Set Resident membership package for user to be paid by another member 'payer'
membership.bill_day = 15
membership.save()
self.assertEqual(membership.package_name(), None)
membership.set_to_package(self.residentPackage, start_date=date(2010, 6, 15), paid_by=payer)
self.assertEqual(membership.package_name(), 'Resident')
self.assertEqual(membership.bill_day, 15)
# Test that payer pays for each of the 3 active subscriptions for user
users_subscriptions = membership.active_subscriptions()
self.assertEqual(2, users_subscriptions.count())
for u in users_subscriptions:
self.assertEqual(u.paid_by, payer)
print(users_subscriptions)
# Generate the bills from the 12th through the 15th
run_bill_batch = BillingBatch.objects.run(start_date=date(2010, 6, 12), end_date=date(2010, 6, 15))
self.assertTrue(run_bill_batch.successful)
# Generate bills and check there are 0 for user, but 1 for payer
self.assertEqual(0, user.bills.count())
self.assertEqual(1, payer.bills.count())
print_all_bills(payer)
payer_bill = payer.bills.get(period_start=date(2010, 6, 15))
for s in payer_bill.subscriptions():
self.assertEqual('Resident', s.package_name)
# Bill is for user membership and not that of payer
self.assertFalse(payer_membership.id == s.membership.id)
self.assertEqual(membership.id, s.membership.id)
def test_new_t40_team_member(self):
# Creat team lead with T40 package
team_lead = User.objects.create(username='Team_Lead', first_name='Team', last_name='Lead')
lead_membership = Membership.objects.for_user(team_lead)
lead_membership.bill_day = today.day
lead_membership.set_to_package(self.t40Package, start_date=one_month_ago)
self.assertTrue('T40' == lead_membership.package_name())
user = User.objects.create(username='Member_Test', first_name='Member', last_name='Test')
user_membership = Membership.objects.for_user(user)
user_membership.bill_day = today.day
self.assertTrue(user_membership.bill_day is not None)
user_membership.set_to_package(self.teamPackage, start_date=today, paid_by=team_lead)
self.assertEqual(0, user_membership.allowance_by_resource(Resource.objects.day_resource))
users_subscriptions = user_membership.active_subscriptions()
# Test that payer pays for each of the 3 active subscriptions for user
self.assertTrue(1, users_subscriptions.count())
for u in users_subscriptions:
self.assertEqual(u.paid_by, team_lead)
# Test bill is for team_lead and not user for $720
current_bill_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(current_bill_batch.successful)
team_lead_bill = team_lead.bills.filter(period_start=today)
self.assertEqual(1, len(team_lead_bill))
total = 0
for b in team_lead_bill:
total = total + b.amount
self.assertEqual(720, total)
def test_alter_future_subscriptions(self):
start = one_week_from_now
end_of_this_period = start + relativedelta(months=1) - timedelta(days=1)
user = User.objects.create(username='member_future', first_name='Member', last_name='Future')
# Set membership package of PT5 to start in one week
membership = Membership.objects.for_user(user)
membership.bill_day = start.day
membership.set_to_package(self.pt5Package, start_date=start)
self.assertEqual(0, membership.active_subscriptions().count())
# Test no current bill but future bill will be for $75
todays_bill_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(todays_bill_batch.successful)
self.assertEqual(0, todays_bill_batch.bills.count())
self.assertEqual(0, user.bills.count())
future_bill_batch = BillingBatch.objects.run(start_date=start, end_date=start)
self.assertTrue(future_bill_batch.successful)
self.assertEqual(1, future_bill_batch.bills.count())
future_bill = user.bills.get(period_start=start)
self.assertEqual(75, future_bill.amount)
self.assertEqual(1, future_bill.line_items.count())
# Add key to future membership plan
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, allowance=1, start_date=start, monthly_rate=100, overage_rate=0)
self.assertEqual(2, membership.active_subscriptions(target_date=start).count())
self.assertTrue(membership.has_key(target_date=start))
# Run bill for start date again and test to make sure it will be $175
altered_batch = BillingBatch.objects.run(start_date=start, end_date=start)
self.assertTrue(altered_batch.successful)
self.assertEqual(1, altered_batch.bills.count())
bill_with_key = user.bills.get(period_start=start)
self.assertEqual(175, bill_with_key.amount)
self.assertEqual(2, bill_with_key.line_items.count())
def test_returning_member_with_future_subscriptions_and_end_dates(self):
user = User.objects.create(username='member_returning', first_name='Member', last_name='Returning')
membership = Membership.objects.for_user(user)
# Start membership package in one week for a length of 2 weeks
start = date(2010, 6, 7)
end = date(2010, 6, 21)
membership.bill_day = start.day
self.assertTrue(membership.package_name() is None)
membership.set_to_package(self.advocatePackage, start_date=start, end_date=end)
# Test that subscription starts in a week and then ends 2 weeks later
self.assertTrue(len(membership.active_subscriptions()) == 0)
self.assertTrue(len(membership.active_subscriptions(target_date=start)) is 1)
self.assertTrue(len(membership.active_subscriptions(target_date=one_month_from_now)) is 0)
# Test bills
todays_bill_batch = BillingBatch.objects.run(start_date=date(2010, 6, 1), end_date=date(2010, 6, 1))
self.assertTrue(todays_bill_batch.successful)
self.assertTrue(0 == len(user.bills.filter(period_start=today)))
batch_on_start_date = BillingBatch.objects.run(start_date=start, end_date=end - timedelta(days=1))
self.assertTrue(batch_on_start_date.successful)
start_date_bill = user.bills.get(period_start=start)
self.assertTrue(start_date_bill is not None)
# Bill should be prorated
self.assertTrue(start_date_bill.amount < self.advocatePackage.monthly_rate())
def test_current_pt5_adds_key(self):
#Create user with PT5 membership package started 2 months ago
user = User.objects.create(username='member_pt5', first_name='Member', last_name='PT5')
membership = Membership.objects.for_user(user)
membership.bill_day = today.day
membership.set_to_package(self.pt5Package, start_date=two_months_ago)
#Confirm last month's bill for PT5
start = today
last_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=yesterday)
last_months_bill = user.bills.get(period_start=one_month_ago)
self.assertTrue(last_batch.successful)
self.assertEqual(75, last_months_bill.amount)
for s in last_months_bill.subscriptions():
self.assertEqual('PT5', s.package_name)
# Add key subscription today
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, package_name='PT5', allowance=1, start_date=start, monthly_rate=100, overage_rate=0)
self.assertTrue(len(membership.active_subscriptions()) is 2)
self.assertTrue(ResourceSubscription.objects.get(resource=Resource.objects.key_resource) in membership.active_subscriptions())
# Test new bill is $175 for PT5 with key
adjusted_batch = BillingBatch.objects.run(start_date=start, end_date=start)
self.assertTrue(adjusted_batch.successful)
current_bill = user.bills.get(period_start=start)
self.assertEqual(175, current_bill.amount)
self.assertTrue(current_bill.line_items.all().count() is 2)
def test_resident_adds_5_coworking_days_today(self):
#Create user with Residet membership package started 2 months ago
user = User.objects.create(username='member_ten', first_name='Member', last_name='Ten')
membership = Membership.objects.for_user(user)
membership.bill_day = today.day
membership.set_to_package(self.residentPackage, start_date=two_months_ago)
day_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.day_resource)
day_subscription
self.assertEqual(5, day_subscription.allowance)
# Test previous bill to be $475
previous_bill_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(previous_bill_batch.successful)
past_bill = user.bills.get(period_start=one_month_ago)
self.assertEqual(475, past_bill.amount)
# Change coworking day subscription from 5 to 10
day_subscription.end_date = today - timedelta(days=1)
day_subscription.save()
self.assertFalse(day_subscription.end_date is None)
ResourceSubscription.objects.create(resource=Resource.objects.day_resource, membership=membership, package_name='Resident', allowance=10, start_date=today, monthly_rate=0, overage_rate=0)
new_day_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.day_resource, end_date=None)
self.assertTrue(len(membership.active_subscriptions()) is 2)
self.assertEqual(10, new_day_subscription.allowance)
# Test billing with updated subscriptions
todays_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(todays_batch.successful)
self.assertTrue(todays_batch.bills.count() == 1)
current_bill = user.bills.get(period_start=today)
self.assertEqual(475, current_bill.amount)
self.assertTrue(current_bill.line_items.all().count() is 2)
future_batch = BillingBatch.objects.run(start_date=one_month_from_now, end_date=one_month_from_now)
self.assertEqual(1, future_batch.bills.count())
future_bill = user.bills.get(period_start=one_month_from_now)
self.assertEqual(475, future_bill.amount)
def test_team_lead_changes_package(self):
# Create user with t40 package started 2 months ago
lead = User.objects.create(username='member_fourteen', first_name='Member', last_name='Fourteen')
lead_membership = Membership.objects.for_user(lead)
lead_membership.bill_day = today.day
lead_membership.set_to_package(self.t40Package, start_date=two_months_ago)
# Create team membership
team_1 = User.objects.create(username='member_fifteen', first_name='Member', last_name='Fifteen')
team_1_membership = Membership.objects.for_user(team_1)
team_1_membership.bill_day = today.day
team_1_membership.set_to_package(self.teamPackage, start_date=two_months_ago, paid_by=lead)
team_2 = User.objects.create(username='member_sixteen', first_name='Member', last_name='Sixteen')
team_2_membership = Membership.objects.for_user(team_2)
team_2_membership.bill_day = today.day
team_2_membership.set_to_package(self.teamPackage, start_date=two_months_ago, paid_by=lead)
# Generate Bills for lead for one month ago
# Should be 720 with 3 line items under the lead's billing
last_months_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(last_months_batch.successful)
self.assertEqual(1, last_months_batch.bills.count())
lead_original_bills = UserBill.objects.get(user=lead)
self.assertEqual(3, lead_original_bills.line_items.count())
self.assertEqual(720, lead_original_bills.amount)
# Change lead's membership packge to T20 and check billing
lead_membership.end_all()
lead_membership.set_to_package(self.t20Package, start_date=today)
self.assertTrue('T20' == lead_membership.package_name())
#Should have bill with 3 line items and a total of $360
updated_batch = BillingBatch.objects.run(start_date=today, end_date=today)
lead_new_bills = UserBill.objects.get(user=lead, period_start=today)
self.assertEqual(3, lead_new_bills.line_items.count())
self.assertEqual(360, lead_new_bills.amount)
def test_team_lead_ends_package(self):
# Create user with t40 package started 2 months ago
lead = User.objects.create(username='member_seventeen', first_name='Member', last_name='Seventeen')
lead_membership = Membership.objects.for_user(lead)
lead_membership.bill_day = today.day
lead_membership.set_to_package(self.t20Package, start_date=two_months_ago)
# Create team membership
team_1 = User.objects.create(username='member_eighteen', first_name='Member', last_name='Eighteen')
team_1_membership = Membership.objects.for_user(team_1)
team_1_membership.bill_day = today.day
team_1_membership.set_to_package(self.teamPackage, start_date=two_months_ago, paid_by=lead)
resident = User.objects.create(username='member_nineteen', first_name='Member', last_name='Nineteen')
resident_membership = Membership.objects.for_user(resident)
resident_membership.bill_day = today.day
resident_membership.set_to_package(self.residentPackage, start_date=two_months_ago, paid_by=lead)
# Generate last months bills
# Test for total of 360 + 0 + 475 = $835
last_months_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(last_months_batch.successful)
lead_original_bills = UserBill.objects.get(user=lead)
self.assertEqual(4, lead_original_bills.line_items.count())
self.assertEqual(835, lead_original_bills.amount)
# End lead and team members' subscriptions but keep resident
lead_membership.end_all()
team_1_membership.end_all()
# Lead and team member should have 0 active_subscriptions while Resident has 2
self.assertEqual(0, lead_membership.active_subscriptions().count())
self.assertEqual(0, team_1_membership.active_subscriptions().count())
self.assertEqual(2, resident_membership.active_subscriptions().count())
# Rerun billing - should only be $475 for the resident paid by lead
updated_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(updated_batch.successful)
lead_new_bills = UserBill.objects.get(user=lead, period_start=today)
self.assertEqual(475, lead_new_bills.amount)
def test_pt10_adds_key_next_bill_period(self):
# Create user with PT10 package started 2 months ago
user = User.objects.create(username='member_twenty', first_name='Member', last_name='Twenty')
membership = Membership.objects.for_user(user)
membership.bill_day = today.day
membership.set_to_package(self.pt10Package, start_date=two_months_ago)
self.assertTrue('PT10' == membership.package_name())
# Generate last month's bills
# Should have one bill for user for $180
last_months_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(last_months_batch.successful)
last_months_bill = user.bills.get(period_start=one_month_ago)
self.assertEqual(180, last_months_bill.amount)
# Add key subscription
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, package_name='PT10', allowance=1, start_date=today, monthly_rate=100, overage_rate=0)
self.assertEqual(2, membership.active_subscriptions().count())
day_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.day_resource)
day_subscription
self.assertEqual(10, day_subscription.allowance)
# Generate new bill with the key
# Should be for $280 = 100 + 180
new_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(new_batch.successful)
user_bill_with_key = user.bills.get(period_start=today)
self.assertTrue(user_bill_with_key.amount == 280)
# Generate next month's bill
next_batch = BillingBatch.objects.run(start_date=one_month_from_now, end_date=one_month_from_now)
self.assertTrue(next_batch.successful)
bill_next_month = user.bills.get(period_start=one_month_from_now)
self.assertTrue(bill_next_month.amount == 280)
def test_pt10_adds_key_halfway_through_bill_period(self):
# Create user with PT10 package started 2 weeks ago
user = User.objects.create(username='member_twentyone', first_name='Member', last_name='Twentyone')
membership = Membership.objects.for_user(user)
membership.bill_day = two_weeks_ago.day
membership.set_to_package(self.pt10Package, start_date=two_weeks_ago)
self.assertEqual('PT10', membership.package_name())
self.assertEqual(1, membership.active_subscriptions().count())
# Generate bill from 2 weeks ago
# $180 for PT10 membership
batch_from_two_weeks_ago = BillingBatch.objects.run(start_date=two_weeks_ago, end_date=two_weeks_ago)
self.assertTrue(batch_from_two_weeks_ago.successful)
last_months_bill = user.bills.get(period_start=two_weeks_ago)
self.assertEqual(180, last_months_bill.amount)
Payment.objects.create(bill=last_months_bill, user=user, amount=last_months_bill.amount, created_by=user)
self.assertEqual(0, last_months_bill.total_owed)
self.assertTrue(last_months_bill.is_open)
# Add key subscription today
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, package_name='PT10', allowance=1, start_date=today, monthly_rate=100, overage_rate=0)
self.assertEqual(2, membership.active_subscriptions(today).count())
# Generate bill with key
# Total should be $180 + prorated key amount ($50-ish)
new_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(new_batch.successful)
bill_with_key = user.bills.get(period_start=two_weeks_ago)
self.assertTrue(bill_with_key.total_owed == (bill_with_key.amount - 180))
def test_change_some_end_dates_when_end_dates_already_exist(self):
# Create user with Resident package with key started one month ago and end_date at end of next bill period_end
start = one_month_ago
end = (start + relativedelta(months=2)) - timedelta(days=1)
user = User.objects.create(username='member_twentytwo', first_name='Member', last_name='Twentytwo')
membership = Membership.objects.for_user(user)
membership.bill_day = one_month_ago.day
membership.set_to_package(self.residentPackage, start_date=start, end_date=end)
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, package_name='Resident', allowance=1, start_date=start, end_date=end, monthly_rate=100, overage_rate=0)
self.assertTrue('Resident' == membership.package_name())
self.assertEqual(3, membership.active_subscriptions().count())
self.assertTrue(len(membership.active_subscriptions(target_date=two_months_from_now)) == 0)
# Generate bill at start date to check bills
# $575 = 475 + 100
start_date_batch = BillingBatch.objects.run(start_date=start, end_date=start)
self.assertTrue(start_date_batch.successful)
start_bill = user.bills.get(period_start=start)
self.assertEqual(575, start_bill.amount)
# Generate bill for after currently set end_date (should not exist)
original_end_batch = BillingBatch.objects.run(start_date=two_months_from_now, end_date=two_months_from_now)
self.assertTrue(original_end_batch.successful)
original_end_bill = user.bills.filter(period_start=two_months_from_now)
self.assertTrue(len(original_end_bill) == 0)
# Change the end date everything except the key subscription
for a in membership.active_subscriptions():
if a.resource != Resource.objects.key_resource:
a.end_date = yesterday
a.save()
key_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.key_resource)
day_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.day_resource)
desk_subscription = ResourceSubscription.objects.get(membership=membership, resource=Resource.objects.desk_resource)
self.assertTrue(key_subscription.end_date == end)
self.assertTrue(key_subscription.end_date != desk_subscription.end_date)
self.assertTrue(key_subscription.end_date != day_subscription.end_date)
self.assertTrue(desk_subscription.end_date == day_subscription.end_date)
# Generate the bill for today to make bill of $100 for key subscription
# Bill should total $100 with 1 line item
new_today_batch = BillingBatch.objects.run(start_date=today, end_date=today)
new_today_bill = user.bills.get(period_start=today)
self.assertEqual(100, new_today_bill.amount)
self.assertTrue(membership.package_name() == 'Resident')
self.assertEqual(1, new_today_bill.line_items.all().count())
# Generate bill after end_date
new_end_batch = BillingBatch.objects.run(start_date=two_months_from_now, end_date=two_months_from_now)
self.assertTrue(new_end_batch.successful)
new_end_bill = user.bills.filter(period_start=two_months_from_now)
self.assertTrue(len(new_end_bill) == 0)
def test_change_all_end_dates_when_end_dates_already_exist(self):
# Create user with Resident package with key started one month ago and end_date at end of next bill period_end
start = one_month_ago
end = (start + relativedelta(months=2)) - timedelta(days=1)
user = User.objects.create(username='member_twentythree', first_name='Member', last_name='Twentythree')
membership = Membership.objects.for_user(user)
membership.bill_day = one_month_ago.day
membership.set_to_package(self.residentPackage, start_date=start, end_date=end)
ResourceSubscription.objects.create(resource=Resource.objects.key_resource, membership=membership, package_name='Resident', allowance=1, start_date=start, end_date=end, monthly_rate=100, overage_rate=0)
self.assertTrue('Resident' == membership.package_name())
self.assertEqual(3, membership.active_subscriptions().count())
self.assertTrue(len(membership.active_subscriptions(target_date=two_months_from_now)) == 0)
# Generate bill for today to check bills
# $575 = $475 (for Resident package) + $100 (for key)
original_bill_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=yesterday)
self.assertTrue(original_bill_batch.successful)
original_bill = user.bills.get(period_start=one_month_ago)
self.assertEqual(575, original_bill.amount)
# Generate bill for after currently set end_date (should not exist)
original_end_batch = BillingBatch.objects.run(start_date=two_months_from_now, end_date=two_months_from_now)
self.assertTrue(original_end_batch.successful)
self.assertTrue(original_end_batch.bills.count() == 0)
original_end_bill = user.bills.filter(period_start=two_months_from_now)
# Set end resource subscriptions for yesterday
membership.end_all(target_date = yesterday)
self.assertTrue(membership.active_subscriptions().count() == 0)
# There should now be no bill for today
new_end_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(new_end_batch.successful)
new_end_bill = user.bills.filter(period_start=today)
self.assertTrue(len(new_end_bill) == 0)
def test_ending_package_yesterday(self):
# Create Advocate package with start date of one month ago
start = one_month_ago
user = User.objects.create(username='member_twentyfour', first_name='Member', last_name='Twentyfour')
membership = Membership.objects.for_user(user)
membership.bill_day = one_month_ago.day
membership.set_to_package(self.advocatePackage, start_date=start)
self.assertEqual(1, membership.active_subscriptions().count())
# Generate today's bill if not end date
original_bill_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=one_month_ago)
self.assertTrue(original_bill_batch.successful)
original_bill = user.bills.get(period_start=one_month_ago)
self.assertEqual(30, original_bill.amount)
# End all subscriptions yesterday
membership.end_all(target_date=yesterday)
self.assertTrue(membership.active_subscriptions().count() == 0)
# Rerun billing now that subscriptions have been ended
# There should be NO new bill to be paid
ended_bill_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(ended_bill_batch.bills.count() == 0)
self.assertTrue(ended_bill_batch.successful)
new_end_bill = user.bills.filter(period_start=today)
self.assertTrue(len(new_end_bill) == 0)
def test_ending_package_at_end_of_bill_period(self):
# Create PT10 package with start date of one month ago
start = one_month_ago
end = (today + relativedelta(months=1)) - timedelta(days=1)
user = User.objects.create(username='member_twentyfive', first_name='Member', last_name='Twentyfive')
membership = Membership.objects.for_user(user)
membership.bill_day = one_month_ago.day
membership.set_to_package(self.pt10Package, start_date=start)
self.assertEqual(1, membership.active_subscriptions().count())
# Generate today's bill
original_bill_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(original_bill_batch.successful)
original_bill = user.bills.get(period_start=today)
self.assertEqual(180, original_bill.amount)
# End all subscriptions at end of bill period and test still have active subscriptions today
membership.end_all(target_date = end)
self.assertTrue(membership.active_subscriptions().count() == 1)
# Rerun billing now that subscriptions have been ended
# Return one bill for $180
ended_bill_batch = BillingBatch.objects.run(start_date=today, end_date=today)
self.assertTrue(ended_bill_batch.successful)
new_bill = user.bills.get(period_start=today)
self.assertEqual(180, new_bill.amount)
# There should be no future bill
future_bill_batch = ended_bill_batch = BillingBatch.objects.run(start_date=one_month_from_now, end_date=one_month_from_now)
end_bill = user.bills.filter(period_start=one_month_from_now)
self.assertTrue(len(end_bill) == 0)
def test_ending_package_today(self):
# Create PT15 package with start date of one month ago with the next billing period starting tomorrow
start = one_month_ago + timedelta(days=1)
user = User.objects.create(username='member_twentysix', first_name='Member', last_name='Twentysix')
membership = Membership.objects.for_user(user)
membership.bill_day = start.day
membership.set_to_package(self.pt15Package, start_date=start)
self.assertEqual(1, membership.active_subscriptions().count())
# Generate today's bill if not end date
today_bill_batch = BillingBatch.objects.run(start_date=start, end_date=start)
self.assertTrue(today_bill_batch.successful)
original_bill = user.bills.get(period_start=start)
self.assertEqual(225, original_bill.amount)
# Set end date to today
membership.end_all(target_date=today)
self.assertTrue(membership.active_subscriptions(target_date=tomorrow).count() == 0)
# Check to make sure no bill will generate tomorrow
ended_bill_batch = BillingBatch.objects.run(start_date=tomorrow, end_date=tomorrow)
bill_after_end_date = user.bills.filter(period_start=tomorrow)
self.assertTrue(len(bill_after_end_date) == 0)
def test_room_booking_hours_user_less_than_allowance(self):
# Create subscription for 10 room booking hours
start = one_month_ago + timedelta(days=2)
user = User.objects.create(username='member_twentyseven', first_name='Member', last_name='Twentyseven')
membership = Membership.objects.for_user(user)
membership.bill_day = start.day
membership.set_to_package(self.eventPackage, start_date=start)
self.assertEqual(1, membership.active_subscriptions().count())
# Create event for 6 hours
event1 = Event.objects.create(user=user, start_ts=localtime(now()) - timedelta(hours=6), end_ts=localtime(now()), room=Room.objects.create(name="Room 1", has_phone=False, has_av=False, floor=1, seats=4, max_capacity=10, default_rate=20.00, members_only=False))
# Make sure bill returns with line for subscription and one line for event
new_bill_batch = BillingBatch.objects.run(start_date=start, end_date = (start + relativedelta(months=1) - timedelta(days=1)))
self.assertTrue(new_bill_batch.successful)
user_bill = user.bills.get(period_start=start, period_end=start + relativedelta(months=1) - timedelta(days=1))
self.assertEqual(100, user_bill.amount)
# Should have 2 line items. One for the subscription & one for the event
self.assertEqual(2, user_bill.line_items.all().count())
def test_room_booking_hour_overage(self):
# Create subscription for 10 room booking hours
start = one_month_ago + timedelta(days=2)
user = User.objects.create(username='member_twentyeight', first_name='Member', last_name='Twentyeight')
membership = Membership.objects.for_user(user)
membership.bill_day = start.day
membership.set_to_package(self.eventPackage, start_date=start)
self.assertEqual(1, membership.active_subscriptions().count())
# Create event for 12 hours
event1 = Event.objects.create(user=user, start_ts=localtime(now()) - timedelta(hours=12), end_ts=localtime(now()), room=Room.objects.create(name="Room 1", has_phone=False, has_av=False, floor=1, seats=4, max_capacity=10, default_rate=20.00, members_only=False))
# Run billing batch
new_bill_batch = BillingBatch.objects.run(start_date=start, end_date=(start + relativedelta(months=1) - timedelta(days=1)))
self.assertTrue(new_bill_batch.successful)
user_bill = user.bills.get(period_start=start, period_end=start + relativedelta(months=1) - timedelta(days=1))
# Should have overage of $40 due to 2 extra room booking hours over allowance
self.assertEqual(event1.bill, user_bill)
self.assertEqual(140, user_bill.amount)
# Should have 2 line items. One for the subscription & one for the event
self.assertEqual(2, user_bill.line_items.all().count())
# Not ready yet
# def test_room_booking_for_inactive_member(self):
# user = User.objects.create(username='member_twentynine', first_name='Member', last_name='Twentynine')
#
# event1 = Event.objects.create(user=user, start_ts=localtime(now()) - timedelta(hours=36), end_ts=localtime(now())-timedelta(hours=34), room=Room.objects.create(name="Room 1", has_phone=False, has_av=False, floor=1, seats=4, max_capacity=10, default_rate=20.00, members_only=False))
#
# new_bill_batch = BillingBatch.objects.run(start_date=one_month_ago, end_date=today)
# self.assertTrue(new_bill_batch.successful)
#
# user_bill = user.bills.get(period_start=today, period_end=today)
# print_bill(user_bill)
# self.assertEqual(event1.bill, user_bill)
# self.assertEqual(40, user_bill.amount)
# # Should have 1 line items. Just one for the event
# self.assertEqual(1, user_bill.line_items.all().count())
# Copyright 2019 Office Nomads LLC (http://www.officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
<reponame>BCI-NET/FUCONE
"""
==============================================================
Cho2017 - Parameters optimization: Frequency band - FUCONE
===============================================================
This module is design to select the frequency bands that enhance the accuracy
"""
# Authors: <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import (
LogisticRegression,
)
from pyriemann.tangentspace import TangentSpace
from moabb.paradigms import LeftRightImagery
from moabb.datasets import (
Cho2017,
)
from fc_pipeline import (
FunctionalTransformer,
EnsureSPD,
WithinSessionEvaluationFCDR,
)
##
if os.path.basename(os.getcwd())=='FUCONE':
os.chdir('Database')
basedir = os.getcwd()
##
threshold = [0.05, 0.01, 0.005]
nb_nodes = [5, 10, 15]
datasets = [Cho2017()]
# list of pre-selected subjects - done
subj = [14, 43, 50, 35, 3, 29, 7, 17, 40, 38]
print( "#################" + "\n"
"List of pre-selected subjects from Cho2017 (5 best and 5 least performant): " + "\n"
+ str(subj) + "\n"
"#################")
# list of pre-selected FC metrics
spectral_met = ["coh", "imcoh", "plv", "wpli2_debiased", 'instantaneous', 'lagged']
print( "#################" + "\n"
"List of pre-selected FC metrics: " + "\n"
+ str(spectral_met) + "\n"
"#################")
freqbands = {
"delta": [2, 4],
"theta": [4, 8],
"alpha": [8, 12],
"beta": [15, 30],
"gamma": [30, 45],
"defaultBand": [8, 35],
}
for d in datasets:
d.subject_list = subj
d.n_sessions = 1
path_csv_root = basedir + "/1_Dataset-csv/" + d.code
path_data_root = basedir + "/2_Dataset-npz/" + d.code
path_data_root_chan = path_data_root + "/Chan_select/"
path_figures_root = basedir + "/0_Figures/" + d.code
os.chdir(path_data_root)
for f in freqbands:
fmin = freqbands[f][0]
fmax = freqbands[f][1]
paradigm = LeftRightImagery(fmin=fmin, fmax=fmax)
pipelines = {}
for sm in spectral_met:
ft = FunctionalTransformer(
delta=1, ratio=0.5, method=sm, fmin=fmin, fmax=fmax
)
pname_preDR = sm + "+elasticnet"
pipelines[pname_preDR] = Pipeline(
steps=[
("sm", ft),
("spd", EnsureSPD()),
("tg", TangentSpace(metric="riemann")),
(
"LogistReg",
LogisticRegression(
penalty="elasticnet",
l1_ratio=0.15,
intercept_scaling=1000.0,
solver="saga",
),
),
]
)
evaluation = WithinSessionEvaluationFCDR(
fmin=fmin,
fmax=fmax,
paradigm=paradigm,
datasets=[d],
n_jobs=-1,
random_state=42,
return_epochs=True,
overwrite=True,
)
results = evaluation.process(pipelines)
results['FreqBand'] = f
results.to_csv(
path_csv_root
+ "/res_np_single_pipelines_Cho2017_preSelectedSubj_OptFreqBands_"
+ f
+ ".csv"
)
## script to compare results between datasets & plots
paradigm = LeftRightImagery(fmin=8, fmax=35)
ch_labels = dict() # dict that contains all the channel labels
all_res_temp = pd.DataFrame()
for d in datasets:
path_csv_root = basedir + "/1_Dataset-csv/" + d.code
for f in freqbands:
res = pd.read_csv(
path_csv_root
+ "/res_np_single_pipelines_Cho2017_preSelectedSubj_OptFreqBands_"
+ f
+ ".csv"
)
all_res_temp = pd.concat([all_res_temp, res])
all_res_temp.to_csv(
path_csv_root
+ "/res_np_single_pipelines_Cho2017_preSelectedSubj_AllFreq_OptFreqBands_"
+ f
+ ".csv"
)
results=pd.DataFrame()
for f in freqbands:
results_f_temp=pd.read_csv("1_Dataset-csv/"+d.code+"/res_np_single_pipelines_"+d.code+"_preSelectedSubj_OptFreqBands_"+f+".csv")
results_f=results_f_temp.head(n=60)
results_f=results_f.drop(columns=results_f.keys()[0])
results_f["FreqBand"]=[f]*len(results_f)
results=pd.concat((results,results_f))
results.to_csv("1_Dataset-csv/"+d.code+"/res_np_single_pipelines_"+d.code+"_preSelectedSubj_AllFreq_OptFreqBands_defaultBand.csv")
|
<reponame>sungcheolkim78/py_imlib
"""
fmin (scipy.optimize 1.3.1) sckim version for numba optimization
"""
import numpy as np
from numba import njit
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has been exceeded.',
'maxiter': 'Maximum number of iterations has been exceeded.',
'pr_loss': 'Desired error not necessarily achieved due to precision loss.'}
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] <NAME>. and <NAME>. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] <NAME>. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and <NAME> (Eds.), <NAME>,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'initial_simplex': initial_simplex}
res = _minimize_neldermead(func, x0, args, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
return retlist
else:
return res['x']
def _minimize_neldermead(func, x0, args=(),
maxiter=None, maxfev=None, disp=False,
initial_simplex=None,
xatol=1e-4, fatol=1e-4, adaptive=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization [1]_.
References
----------
.. [1] <NAME>. and <NAME>.
Implementing the Nelder-Mead simplex algorithm with adaptive
parameters. 2012. Computational Optimization and Applications.
51:1, pp. 259-277
"""
if 'ftol' in unknown_options:
warnings.warn("ftol is deprecated for Nelder-Mead,"
" use fatol instead. If you specified both, only"
" fatol is used.",
DeprecationWarning)
if (np.isclose(fatol, 1e-4) and
not np.isclose(unknown_options['ftol'], 1e-4)):
# only ftol was probably specified, use it.
fatol = unknown_options['ftol']
unknown_options.pop('ftol')
if 'xtol' in unknown_options:
warnings.warn("xtol is deprecated for Nelder-Mead,"
" use xatol instead. If you specified both, only"
" xatol is used.",
DeprecationWarning)
if (np.isclose(xatol, 1e-4) and
not np.isclose(unknown_options['xtol'], 1e-4)):
# only xtol was probably specified, use it.
xatol = unknown_options['xtol']
unknown_options.pop('xtol')
_check_unknown_options(unknown_options)
maxfun = maxfev
#fcalls, wfunc = wrap_function(func, args)
def wfunc(*wrapper_args):
return func(*(wrapper_args + args))
fcalls = [0]
if adaptive:
dim = float(len(x0))
rho = 1
chi = 1 + 2/dim
psi = 0.75 - 1/(2*dim)
sigma = 1 - 1/dim
else:
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
x0 = np.asfarray(x0).flatten()
if initial_simplex is None:
N = len(x0)
sim = np.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = np.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
sim, fsim, iterations = _fmin_iteration(fcalls, wfunc, maxfun, maxiter, sim, xatol, fatol, rho, chi, psi, sigma, N)
x = sim[0]
fval = np.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
return result
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in SciPy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def _fmin_iteration(fcalls, func, maxfun, maxiter, sim, xatol, fatol, rho, chi, psi, sigma, N):
""" fmin iteration part """
one2np1 = list(range(1, N + 1))
fsim = np.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = np.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and
np.max(np.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = np.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
iterations += 1
return [sim, fsim, iterations]
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
# vim:foldmethod=indent:foldlevel=0
|
<gh_stars>0
#!/usr/bin/env python
#coding: utf-8
from riak_common import *
import riak
import time
import redis
from riak.datatypes import Set
from time import sleep
def test_set_dt_empty():
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
riak_set = get_set_dt_object(riak_client, 'test', key)
assert_equal(0, len(riak_set))
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(0, value)
def test_set_dt_delete():
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
wrote = retry_write(lambda: nutcracker.sadd(nc_key, distinct_value()))
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(1, value)
wrote = retry_write(lambda: nutcracker.delete(nc_key))
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(0, value)
def test_set_dt_add_single():
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
add_values = [ distinct_value(), distinct_value(), distinct_value() ]
remove_values = [ add_values[1] ]
values = []
for value in add_values:
wrote = retry_write(lambda: nutcracker.sadd(nc_key, value))
assert_equal(1, wrote)
values.append(value)
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(len(values), value)
for value in remove_values:
wrote = retry_write(lambda: nutcracker.srem(nc_key, value))
assert_equal(1, wrote)
values.remove(value)
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(len(values), value)
nc_values = retry_read(lambda: nutcracker.smembers(nc_key))
assert_equal(len(values), len(nc_values))
for value in values:
exists = retry_read(lambda: nutcracker.sismember(nc_key, value))
assert(exists)
def test_set_dt_add_multi():
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
add_values = [ distinct_value(), distinct_value(), distinct_value() ]
remove_values = [ add_values[1], add_values[0] ]
values = []
wrote = retry_write(lambda: nutcracker.sadd(nc_key, *add_values))
#HACK: sadd and srem multi is returning 0 or 1, not count of affected values
#assert_equal(len(add_values), wrote)
assert(wrote > 0 and wrote <= len(add_values)), \
"expected wrote: %d to be between 0 and len(add_values): %d" % \
(wrote, len(add_values))
for value in add_values:
values.append(value)
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(len(values), value)
wrote = retry_write(lambda: nutcracker.srem(nc_key, *remove_values))
#HACK: sadd and srem multi is returning 0 or 1, not count of affected values
#assert_equal(len(remove_values), wrote)
assert(wrote > 0 and wrote <= len(remove_values)), \
"expected wrote: %d to be between 0 and len(remove_values): %d" % \
(wrote, len(remove_values))
for value in remove_values:
values.remove(value)
value = retry_read(lambda: nutcracker.scard(nc_key))
assert_equal(len(values), value)
nc_values = retry_read(lambda: nutcracker.smembers(nc_key))
assert_equal(len(values), len(nc_values))
for value in values:
exists = retry_read(lambda: nutcracker.sismember(nc_key, value))
assert(exists)
def test_set_dt_ttl():
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
add_values = [ distinct_value(), distinct_value() ]
remove_values = []
values = []
riak_set = get_set_dt_object(riak_client, 'test', key)
value = add_values[0]
wrote = retry_write(lambda: nutcracker.sadd(nc_key, value))
values.append(value)
value = add_values[1]
riak_set.add(value)
values.append(value)
riak_set.store()
for i in range(0, 10):
nc_values = retry_read(lambda: nutcracker.smembers(nc_key))
if len(values) == len(nc_values):
break
sleep(0.1)
assert_equal(len(values), len(nc_values))
def test_set_dt_max_add():
n_to_adds = range(1, 100)
for n_to_add in n_to_adds:
try:
_test_set_dt_max_add(n_to_add)
except Exception as e:
assert(False), "failed at n_to_add: %d, e: %s" % (n_to_add, e)
def _test_set_dt_max_add(n_to_add):
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
add_values = set([ distinct_value() for i in range(0, n_to_add) ])
remove_values = []
values = []
wrote = retry_write(lambda: nutcracker.sadd(nc_key, *add_values))
values = add_values
nc_values = retry_read(lambda: nutcracker.smembers(nc_key))
assert_equal(len(values), len(nc_values))
for value in values:
assert(value in nc_values), \
"expected {0} to be in {1}" % (value, nc_values)
def test_set_dt_max_items():
n_items = range(1, 100)
for n_item in n_items:
try:
_test_set_dt_max_items(n_item)
except Exception as e:
assert(False), "failed at n_item: %d, e: %s" % (n_item, e)
def _test_set_dt_max_items(n_items):
(riak_client, _, nutcracker, redis) = getconn()
key = distinct_key()
nc_key = nutcracker_sets_key(key)
n_items_added = 0
for i in range(0, n_items):
n_items_added += retry_write(lambda: nutcracker.sadd(nc_key, distinct_value()))
nc_values = retry_read(lambda: nutcracker.smembers(nc_key))
assert_equal(n_items_added, len(nc_values))
def get_set_dt_object(riak_client, bucket, key):
set_dt_bucket = riak_client.bucket_type(bucket_type_name()).bucket(bucket)
return Set(set_dt_bucket, key)
def bucket_type_name():
#HACK: see riak server module code about arbitrary bucket-type name
return 'sets'
def nutcracker_sets_key(riak_key):
nc_key = nutcracker_key(riak_key)
return '%s:%s' % (bucket_type_name(), nc_key)
|
# -*- coding: utf-8 -*-
#================================================================
# Don't go gently into that good night.
#
# author: klaus
# description:
#
#================================================================
import time
from tqdm import tqdm
import torch
from torch.utils.data.distributed import DistributedSampler
from core.metrics.metrics import AverageMeter
from .utils.nested import nested_call, nested_to_device
class BaseTrainer(object):
"""basic trainer"""
def __init__(self,
cfg,
data_loaders,
models,
criterions,
optimizers,
schedulers,
checkpointer,
phases,
device,
local_rank=0,
writer=None,
logger=None):
super(BaseTrainer, self).__init__()
self.cfg = cfg
self.models = models
self.data_loaders = data_loaders
self.criterions = criterions
self.optimizers = optimizers
self.schedulers = schedulers
self.checkpointer = checkpointer
self.phases = phases
self.device = device
self.writer = writer
self.logger = logger
self.local_rank = local_rank
self.epoch_logs = {phase: {} for phase in phases}
try:
self.world_size = cfg.DDP.WORLD_SIZE
except Exception as e:
self.world_size = 1
def train_step(self, batch, batch_idx, global_step):
pass
def test_step(self, batch, batch_idx, global_step):
pass
def on_train_epoch_start(self, epoch):
pass
def on_train_epoch_end(self, epoch):
pass
def on_test_epoch_start(self, epoch):
pass
def on_test_epoch_end(self, epoch):
pass
def forward(self, x):
raise NotImplemented('not implemented')
def train(self, evaluate_freq=1):
"""train function
Kwargs:
evaluate_freq (int): evaluate frequence (epoch). Default 1. If -1, no evaluation.
Returns: TODO
"""
for epoch in range(self.cfg.SOLVER.START_EPOCH, self.cfg.SOLVER.NUM_EPOCHS + 1):
# train
losses = {phase: AverageMeter() for phase in self.phases}
for phase in self.phases:
training = phase == 'train'
if (not training) and epoch % evaluate_freq != 0:
continue
start = time.time()
# on epoch start
## clean epoch_logs
self.epoch_logs[phase] = {}
## set model and sampler
if training:
nested_call(self.models, 'train')
## set epoch to samplers
for data_loader in self.data_loaders.values():
sampler = data_loader.sampler
sampler.set_epoch(epoch) if sampler is not None and isinstance(
sampler, DistributedSampler) else None
else:
nested_call(self.models, 'eval')
self.on_train_epoch_start(epoch) if training else self.on_test_epoch_start(epoch)
# epoch
step_fn = self.train_step if training else self.test_step
with torch.set_grad_enabled(training):
if self.local_rank == 0:
pbar = tqdm(self.data_loaders[phase],
desc='{}ing epoch {}'.format(phase.capitalize(), epoch))
else:
pbar = self.data_loaders[phase]
# train batches
for batch_idx, batch in enumerate(pbar, start=1):
global_step = (epoch - 1) * len(self.data_loaders[phase]) + batch_idx
batch = nested_to_device(batch, self.device, non_blocking=True)
loss, batch_logs = step_fn(batch, batch_idx, global_step)
losses[phase].update(loss.item())
# log batch
if self.local_rank == 0:
self.log_batch(loss, batch_logs, pbar, global_step, phase)
# on epoch end
self.on_train_epoch_end(epoch) if training else self.on_test_epoch_end(epoch)
end = time.time()
epoch_loss = losses[phase].compute()
self.epoch_logs[phase].update({'loss': epoch_loss})
# log epoch
if self.local_rank == 0:
log = f'Epoch {epoch:03d}'
log += f' | {phase.capitalize()}'
log += f' | {self.epoch_logs[phase]}'
log += f' | Time cost:{end-start} sec'
self.logger.info(log)
if self.writer is not None:
for k, v in self.epoch_logs[phase].items():
self.writer.add_scalar('epoch/' + k, v, global_step=epoch)
#save checkpoint
if phase == 'train':
self.checkpointer.save(epoch, self.models, self.optimizers)
def log_batch(self, loss, batch_logs, pbar, global_step, phase):
"""log batch metrics.
Args:
loss (float): The loss.
batch_logs (dict): The batch logs to show in pbar
pbar (tqdm): progress bar
global_step (int): global_step
phase (string): The current phase.
training (bool): whether is training
Returns: TODO
"""
training = phase == 'train'
batch_logs[f'{phase}/loss'] = loss.item()
pbar.set_postfix(batch_logs)
## log loss and lr to tensorboard
if self.writer is not None:
logs = batch_logs
if training and self.schedulers is not None:
lrs = nested_call(self.schedulers, 'get_last_lr')
if isinstance(lrs, dict):
logs.update({'learning_rate/' + k: v[0] for k, v in lrs.items()})
else:
logs['learning_rate'] = lrs[0]
for k, v in logs.items():
self.writer.add_scalar(k, v, global_step=global_step)
|
<gh_stars>1-10
import glob
import os
import sys
import uuid
import arcpy
def create_wksp(path, gdb):
"""Create a .gdb workspace in given path
"""
wksp = os.path.join(path, gdb)
# create the workspace if it doesn't exist
if not arcpy.Exists(wksp):
arcpy.CreateFileGDB_management(path, gdb)
return os.path.join(path, gdb)
def wsdrefine_dem(in_wsd, in_stream, in_dem, out_wsd):
"""
Refine a watershed polygon - extract only areas that flow to supplied stream segment.
- in_wsd: feature class holding watershed area to be refined
- in_stream: feature class holding stream to be used as 'pour points'
"""
# get spatial analyst and set env
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
raise EnvironmentError("Spatial Analyst license unavailable")
arcpy.env.workspace = "IN_MEMORY"
# environment settings
arcpy.env.overwriteOutput = True
extent = arcpy.Describe(in_wsd).extent
arcpy.env.extent = extent
# read inputs
arcpy.MakeFeatureLayer_management(in_stream, "streams_fl")
arcpy.MakeFeatureLayer_management(in_wsd, "wsd_fl")
print(" - writing wsd to temp fc")
# write the watershed to a feature class so we can get the extent
# and create mask
arcpy.Dissolve_management("wsd_fl", "wsd_fc_tmp")
# set extent to wsd polygon
arcpy.env.mask = "wsd_fc_tmp"
extent = arcpy.Describe("wsd_fc_tmp").extent
arcpy.env.extent = extent
print(" - writing streams to raster")
# for some reason the stream raster doesn't overwrite the existing output
# as workaround, create raster using unique name
streams_raster = "stream_" + str(uuid.uuid4())
arcpy.FeatureToRaster_conversion("streams_fl", "linear_fea", streams_raster, "25")
# fill the dem, calculate flow direction and create watershed raster
print(" - filling DEM")
fill = arcpy.sa.Fill(in_dem, 100)
print(" - calculating flow direction")
flow_direction = arcpy.sa.FlowDirection(fill, "NORMAL")
print(" - creating DEM based watershed")
wsd_grid = arcpy.sa.Watershed(flow_direction, streams_raster)
# check to make sure there is a result - if all output raster is null,
# do not try to create a watershed polygon output
out_is_null = arcpy.sa.IsNull(wsd_grid)
check_min_result = arcpy.GetRasterProperties_management(out_is_null, "MINIMUM")
check_min = check_min_result.getOutput(0)
check_max_result = arcpy.GetRasterProperties_management(out_is_null, "MAXIMUM")
check_max = check_max_result.getOutput(0)
if "0" in (check_min, check_max):
print(" - writing new watershed to %s" % out_wsd)
arcpy.RasterToPolygon_conversion(wsd_grid, out_wsd, "SIMPLIFY")
return out_wsd
else:
return None
def postprocess(args):
"""Run postprocessing of watershed with DEM
"""
# find input shapes
if len(args) > 1:
wksp = args[1]
else:
wksp = "tempfiles"
# run the dem postprocessing
for folder in glob.glob(os.path.join(wksp, "*")):
# look for required files
if (
os.path.exists(os.path.join(folder, "hexgrid.shp"))
and os.path.exists(os.path.join(folder, "pourpoints.shp"))
and os.path.exists(os.path.join(folder, "dem.tif"))
):
print("Postprocessing " + folder)
# run the job
wsdrefine_dem(
os.path.join(folder, "hexgrid.shp"),
os.path.join(folder, "pourpoints.shp"),
os.path.join(folder, "dem.tif"),
os.path.join(folder, "refined.shp"),
)
if __name__ == "__main__":
postprocess(sys.argv)
|
'''in_use_do_not_archive
constants.py module used with the CCA3
contains constants, many used in gdata.py, ndata.py, hdata.py
no classes at this time in this module
requirements.txt:
provided simply here as a roadmap to the modules in the CCA3
please check with cca4.py to make sure latest requirements
'''
##START PRAGMAS
#
#pylint: disable=line-too-long
# prefer to take advantage of longer line length of modern monitors, even with multiple windows
#pylint: disable=invalid-name
# prefer not to use snake_case style for very frequent data structure or small temp variables
#pylint: disable=bare-except
# prefer in some code areas to catch any exception rising
#pylint: disable=too-many-branches
#pylint: disable=too-many-statements
# prefer to use comfortable number of branches and statements, especially in user menu communication
#pylint: disable=too-many-instance-attributes
#pylint: disable=unused-wildcard-import
#pylint: disable=wildcard-import
# use wildcard import for constants
##END PRAGMAS
## START IMPORTS START IMPORTS
#
##standard imports -- being used by this module
try:
#import pdb
import sys
#import platform
#import os.path
#import random
#import copy
except ImportError:
print('\nprogram will end -- constants.py module of causal cog arch unable to import standard lib module')
print('please ensure correct version of python can be accessed')
sys.exit()
#
##PyPI imports -- being used by this module
try:
#import numpy as np
#import colorama # type: ignore
#import pyfiglet # type: ignore
#import termcolor
pass
except ImportError:
print('\nprogram will end -- constants.py module of the causal cog arch unable to import a PyPI module')
print('please check requirements.txt and install all required dependencies')
sys.exit()
#
##non-PyPI third-party imports -- being used by this module
try:
pass
#justification/ Awesome/LibHunt ratings for non-pypi imports: n/a
#nb. none
except ImportError:
print('program will end -- constants.py module of the causal cog arch unable to import a third-party module')
print('please check requirements.txt and install all required dependencies')
sys.exit()
#
##CCA1 module imports -- being used by this module
try:
#from constants import *
#import gdata
#import ddata
##import hdata
#import main_mech
#import eval_micro #June 2021 deprecated
#import eval_milli #June 2021 deprecated
#import palimpsest #nb without GPU will use excessive resources
pass
except ImportError:
print('program will end -- constants.py module unable to import a causal cognitive architecture module')
print('please check requirements.txt and install all required dependencies')
sys.exit()
#
#
##START CONSTANTS
#
VERSION = 'not specified'
HARDWARE = False
MEMORY_CHECKING_ON_TEMP = False
FULL_CAUSAL = False
BINDING = True #version for CCA3 Binding Paper to avoid GPU, demonstrate equations
DEBUG = True
FASTRUN = True #True causes skipping of many user inputs
AUTORUN = False #True will run whole session without user input
LIFESPAN = 10000 #max loops for main_eval()
MOD_CYCLE_REEVALUATE = 5
SAVE_RECALL_TO_FROM_STORAGE = False
TOTAL_ROWS = 6 #count EDGE squares
TOTAL_COLS = 6 #count EDGE squares
GOAL_RANDOM_WALK = '00000000'
GOAL_SKEWED_WALK = '00000001'
GOAL_PRECAUSAL_FIND_HIKER = '11111111'
GOAL_CAUSAL_FIND_HIKER = '11110000'
TRIES_BEFORE_DECLARE_LOCAL_MINIMUM = 2
DEFAULT_VECTOR = '00000000'
DEFAULT_GOAL = GOAL_RANDOM_WALK
DEFAULT_HIPPOCAMPUS = 'HUMAN'
DEFAULT_FIRST_SCENE = 'FOREST'
ESCAPE_LEFT = '11111111'
FILLER = '00000000'
REFLEX_ESCAPE = '10011001'
INITIATE_VALUE = 0
FIRST_SCENE = 'MOTHER'
CONTINUATION_TEXT = 'Please press ENTER to continue....'
MISSION_COUNTER = 0
MAX_CYCLES_NOW_EXIT = 20
TOTAL_MAPS =1000
TOTAL_OBJECTS = 11 #segments 0-10
TOTAL_ENVIRONMENTS = 1000
TOTAL_SCENES = 20
TOTAL_STREAMS = 20
STANDARD_DELAY = 2
##END CONSTANTS
|
<filename>terragrunt_action.py<gh_stars>0
#!/usr/bin/env python3
import argparse
import json
import os
import re
import subprocess
import sys
from pathlib import Path
import git
GIT_WORKSPACE = "/github/workspace/"
TERRASCAN_PATH = "/usr/local/bin/terrascan"
def get_command_line_options(args):
options = []
if args.config_path:
options.append("-c %s" % args.config_path)
if args.iac_type:
options.append("-i %s" % args.iac_type)
if args.iac_version:
options.append("--iac-version %s" % args.iac_version)
if args.policy_path:
options.append("-p %s" % args.policy_path)
if args.policy_type:
options.append("-t %s" % args.policy_type)
if args.skip_rules:
options.append("--skip-rules=%s" % args.skip_rules)
if args.tag_lines == "true":
options.append("-o json")
return " ".join(options)
def get_dir_list(changed_only, src, path_ignore):
dirs = []
if changed_only == "true":
event_path = os.environ.get("GITHUB_EVENT_PATH")
event_data = ""
with open(event_path) as f:
event_data = json.load(f)
if os.environ.get("GITHUB_EVENT_NAME") == "pull_request":
base = event_data["pull_request"]["base"]["sha"]
elif os.environ.get("GITHUB_EVENT_NAME") == "push":
base = event_data["before"]
else:
base = ""
repo = git.Repo(os.environ.get("GITHUB_WORKSPACE"))
for item in repo.index.diff(str(base)):
if str(item.a_path.parent) not in dirs:
if path_ignore and (re.search(path_ignore, str(item.a_path.parent))):
continue
dirs.append(str(item.a_path.parent))
else:
for file in Path(src).rglob("*.tf"):
if file.parent not in dirs:
if path_ignore and (re.search(path_ignore, str(file.parent))):
continue
dirs.append(str(file.parent))
return dirs
def parse_message(message):
base_path = os.environ.get("GITHUB_WORKSPACE") + "/"
root_dir = message["results"]["scan_summary"]["file/folder"]
if message["results"]["violations"]:
for violation in message["results"]["violations"]:
if violation["severity"].upper() == "HIGH":
level = "error"
else:
level = "warning"
filename = os.path.join(
os.path.dirname(root_dir),
violation["file"],
).replace(base_path, "")
line_number = violation["line"]
error = "{} ({}) - {} ({}) - {} ({})".format(
violation["rule_name"],
violation["rule_id"],
violation["resource_type"],
violation["resource_name"],
violation["description"],
violation["category"],
)
print("::%s file=%s,line=%s::%s" % (level, filename, line_number, error))
error = ""
def run_terragrunt(args):
if not os.path.isfile(TERRASCAN_PATH):
print("::debug::terrascan is required to perform this action")
exit(1)
options = get_command_line_options(args)
dir_list = get_dir_list(args.changed_only, args.iac_dir, args.ignore_path)
exit_code = 0
print(options)
for dir in dir_list:
command_string = TERRASCAN_PATH + " scan " + options + " -d " + dir
if args.debug:
print(command_string)
result = subprocess.run(
[command_string],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
if result.stdout:
if args.tag_lines == "true":
parse_message(json.loads(result.stdout.decode("utf8")))
else:
print(result.stdout.decode("utf8"))
elif result.stderr:
print(result.stderr.decode("utf8"))
sys.exit(2)
if result.returncode > exit_code:
exit_code = result.returncode
if args.only_warn:
exit(0)
else:
exit(exit_code)
def main():
parser = argparse.ArgumentParser(description="Terragrunt GitHub action")
parser.add_argument("--changed_only", default=os.environ.get("INPUT_CHANGED_ONLY"))
parser.add_argument("--config_path", default=os.environ.get("INPUT_CONFIG_PATH"))
parser.add_argument("--debug", default=os.environ.get("INPUT_DEBUG"))
parser.add_argument("--iac_dir", default=os.environ.get("INPUT_IAC_DIR"))
parser.add_argument("--iac_type", default=os.environ.get("INPUT_IAC_TYPE"))
parser.add_argument("--iac_version", default=os.environ.get("INPUT_IAC_VERSION"))
parser.add_argument("--ignore_path", default=os.environ.get("INPUT_IGNORE_PATH"))
parser.add_argument("--only_warn", default=os.environ.get("INPUT_ONLY_WARN"))
parser.add_argument("--policy_path", default=os.environ.get("INPUT_POLICY_PATH"))
parser.add_argument("--policy_type", default=os.environ.get("INPUT_POLICY_TYPE"))
parser.add_argument("--skip_rules", default=os.environ.get("INPUT_SKIP_RULES"))
parser.add_argument(
"--tag_lines",
default=os.environ.get("INPUT_TAG_LINES"),
)
args = parser.parse_args()
if args.debug:
print(args)
run_terragrunt(args)
if __name__ == "__main__":
main()
|
<gh_stars>1-10
import argparse
from os.path import join, isdir, exists
from glob import glob
import logging
from logging import FileHandler, StreamHandler
import yaml
import multiprocessing
from utils.file_io import make_dirs
import sys
logger = logging.getLogger(__name__)
import os
from os.path import join, basename, exists, dirname
import collections
def multi_call(inputs):
contents = load_yaml(inputs)
pin = contents['PARENT_INPUT']
pin = pin[:-1] if pin.endswith('/') or pin.endswith('\\') else pin
input_dirs = [join(pin, i) for i in os.listdir(pin) if os.path.isdir(join(pin, i))]
contents_list = []
for subfolder in input_dirs:
conts = eval(str(contents).replace('$INPUT', subfolder))
conts['OUTPUT_DIR'] = join(conts['OUTPUT_DIR'], basename(subfolder))
contents_list.append(conts)
return contents_list
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def extract_path(path):
f = glob(path)
if isdir(f[0]) or not f:
f = glob(join(path, '*'))
return f
def parse_lazy_syntax(inputs, outputdir):
if isinstance(inputs, str):
in0 = sorted(glob(inputs))
if not in0:
in0 = sorted(glob(join(outputdir, inputs)))
if isdir(in0[0]):
in0 = sorted(glob(join(in0[0], '*')))
elif isinstance(inputs, list):
if all([exists(i) for i in inputs]):
return inputs
in0 = zip(*[sorted(glob(i)) for i in inputs])
if not in0:
in0 = zip(*[sorted(glob(join(i, '*'))) for i in inputs])
if not in0:
in0 = zip(*[sorted(extract_path(join(outputdir, i))) for i in inputs])
return in0
def prepare_path_list(inputs, outputdir):
try:
in0 = parse_lazy_syntax(inputs, outputdir)
except IndexError:
logger.info("Images \"{0}\" not found. Check your path".format(inputs))
print "Images \"{0}\" not found. Check your path".format(inputs)
sys.exit(1)
return in0
def retrieve_in_list(obj, key, empty=[]):
if isinstance(obj, dict):
obj = [obj, ]
st = []
for ob in obj:
if key not in ob:
st.append(empty)
else:
st.append(ob[key])
return st
def parse_operation(operation):
functions = retrieve_in_list(operation, 'function')
params = retrieve_in_list(operation, 'params', empty={})
images = retrieve_in_list(operation, 'images')[0]
labels = retrieve_in_list(operation, 'labels')[0]
output = retrieve_in_list(operation, 'output')[-1]
return functions, params, images, labels, output
def _retrieve_caller_based_on_function(function):
import preprocess, segment, track, postprocess, subdetect, apply
import preprocess_operation, segment_operation, track_operation, postprocess_operation, subdetect_operation
ops_modules = [preprocess_operation, segment_operation, track_operation, postprocess_operation, subdetect_operation, apply]
caller_modules = [preprocess, segment, track, postprocess, subdetect, apply]
module = [m for m, top in zip(caller_modules, ops_modules) if hasattr(top, function)][0]
return getattr(module, "caller")
def run_operation(output_dir, operation):
functions, params, images, labels, output = parse_operation(operation)
inputs = prepare_path_list(images, output_dir)
logger.info(inputs)
inputs_labels = prepare_path_list(labels, output_dir)
output = join(output_dir, output) if output else output_dir
caller = _retrieve_caller_based_on_function(functions[0])
if len(functions) == 1 and functions[0] == 'apply':
ch_names = operation['ch_names'] if 'ch_names' in operation else images
obj_names = operation['obj_names'] if 'obj_names' in operation else labels
caller(zip(*inputs), zip(*inputs_labels), output, obj_names, ch_names)
elif not inputs_labels:
caller(inputs, output, functions, params=params)
else:
caller(inputs, inputs_labels, output, functions, params=params)
def run_operations(output_dir, operations):
for operation in operations:
run_operation(output_dir, operation)
def load_yaml(path):
with open(path) as stream:
contents = yaml.load(stream)
return contents
def single_call(inputs):
contents = load_yaml(inputs)
call_operations(contents)
def call_operations(contents):
make_dirs(contents['OUTPUT_DIR'])
logging.basicConfig(filename=join(contents['OUTPUT_DIR'], 'log.txt'), level=logging.DEBUG)
logging.getLogger("PIL").setLevel(logging.WARNING)
run_operations(contents['OUTPUT_DIR'], contents['operations'])
logger.info("Caller finished.")
return
def _parallel(args):
'''
Use this function if you want to multiprocess using PARENT_INPUT argument
(see input_fireworks.yml).
'''
contents_list = multi_call(args.input[0])
contents_list = [convert(i) for i in contents_list]
pool = multiprocessing.Pool(args.cores, maxtasksperchild=1)
pool.map(call_operations, contents_list, chunksize=1)
pool.close()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--cores", help="number of cores for multiprocessing",
type=int, default=1)
parser.add_argument("input", nargs="*", help="input argument file path")
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.input) == 1:
contents = load_yaml(args.input[0])
if "PARENT_INPUT" in contents:
_parallel(args)
else:
call_operations(contents)
# single_call(args.input[0])
if len(args.input) > 1:
num_cores = args.cores
print str(num_cores) + ' started parallel'
pool = multiprocessing.Pool(num_cores, maxtasksperchild=1)
pool.map(single_call, args.input, chunksize=1)
pool.close()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Cheetah.compat import unicode
import typing
from _base_interface_pyi import BaseInterfacePyi
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '3.2.6.post2'
__CHEETAH_versionTuple__ = (3, 2, 6, 'post', 2)
__CHEETAH_genTime__ = 1649895855.1623263
__CHEETAH_genTimestamp__ = 'Wed Apr 13 20:24:15 2022'
__CHEETAH_src__ = '_x_pyi.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 13 20:24:04 2022'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class x_pyi(BaseInterfacePyi):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(x_pyi, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
# see: https://cheetahtemplate.org/users_guide/inheritanceEtc.html#implements
_v = VFN(VFFSL(SL,"self",True),"init_data",False)() # '$self.init_data()' on line 6, col 1
if _v is not None: write(_filter(_v, rawExpr='$self.init_data()')) # from line 6, col 1.
# $from_imports is a tuple eg: $from_imports = [('.tab_align', 'TabAlign')]
_v = VFN(VFFSL(SL,"self",True),"load_data",False)() # '$self.load_data()' on line 23, col 1
if _v is not None: write(_filter(_v, rawExpr='$self.load_data()')) # from line 23, col 1.
# main Template
safe_name = VFN(VFFSL(SL,"self",True),"get_safe_word",False)(VFFSL(SL,"name",True))
fullname = VFFSL(SL,"namespace",True) + '.' + VFFSL(SL,"safe_name",True)
is_method = 'methods' in VFFSL(SL,"attribs",True)
is_properties = 'properties' in VFFSL(SL,"attribs",True)
is_types = 'types' in VFFSL(SL,"attribs",True)
# set $abc_imports = $self.get_abstract_imports([$is_method],[$is_properties, $is_types])
abc_imports = []
write('''# coding: utf-8
''')
self._handleCheetahInclude("resources/inc_lic.txt", trans=trans, includeFrom="file", raw=True)
write('''# Interface Class
# this is a auto generated file generated by Cheetah
''')
if VFFSL(SL,"libre_office_ver",True): # generated from line 36, col 1
write('''# Libre Office Version: ''')
_v = VFFSL(SL,"libre_office_ver",True) # '$libre_office_ver' on line 37, col 25
if _v is not None: write(_filter(_v, rawExpr='$libre_office_ver')) # from line 37, col 25.
write('''
''')
write('''# Namespace: ''')
_v = VFFSL(SL,"namespace",True) # '$namespace' on line 39, col 14
if _v is not None: write(_filter(_v, rawExpr='$namespace')) # from line 39, col 14.
write('''
''')
if len(VFFSL(SL,"inherits",True)) == 0: # generated from line 40, col 1
inherits = ['ABC']
abc_imports = VFFSL(SL,"abc_imports",True) + ['ABC']
write('''from typing_extensions import Literal
''')
if VFFSL(SL,"requires_typing",True): # generated from line 45, col 1
write('''import typing
''')
for imp in VFFSL(SL,"imports",True): # generated from line 48, col 1
write('''import ''')
_v = VFFSL(SL,"imp",True) # '$imp' on line 49, col 8
if _v is not None: write(_filter(_v, rawExpr='$imp')) # from line 49, col 8.
write('''
''')
if len(VFFSL(SL,"abc_imports",True)) > 0: # generated from line 51, col 1
write('''from abc import ''')
_v = VFN(VFFSL(SL,"self",True),"lst_to_str",False)(VFFSL(SL,"abc_imports",True)) # '$self.lst_to_str($abc_imports)' on line 52, col 17
if _v is not None: write(_filter(_v, rawExpr='$self.lst_to_str($abc_imports)')) # from line 52, col 17.
write('''
''')
for imp in VFFSL(SL,"from_imports",True): # generated from line 54, col 1
_v = VFN(VFFSL(SL,"self",True),"get_from_import",False)(VFFSL(SL,"name",True), VFFSL(SL,"imp",True)) # '$self.get_from_import($name, $imp)' on line 55, col 1
if _v is not None: write(_filter(_v, rawExpr='$self.get_from_import($name, $imp)')) # from line 55, col 1.
write('''
''')
if len(VFFSL(SL,"from_imports_typing",True)) > 0: # generated from line 57, col 1
write('''if typing.TYPE_CHECKING:
''')
for imp in VFFSL(SL,"from_imports_typing",True): # generated from line 59, col 1
write(''' ''')
_v = VFN(VFFSL(SL,"self",True),"get_from_import",False)(VFFSL(SL,"name",True), VFFSL(SL,"imp",True)) # '$self.get_from_import($name, $imp)' on line 60, col 5
if _v is not None: write(_filter(_v, rawExpr='$self.get_from_import($name, $imp)')) # from line 60, col 5.
write('''
''')
write('''
''')
if VFFSL(SL,"allow_db",True): # generated from line 64, col 1
write('''class ''')
_v = VFFSL(SL,"safe_name",True) # '${safe_name}' on line 65, col 7
if _v is not None: write(_filter(_v, rawExpr='${safe_name}')) # from line 65, col 7.
write('''(''')
_v = VFN(VFFSL(SL,"self",True),"get_class_inherits_from_db",False)('ABC') # "$self.get_class_inherits_from_db('ABC')" on line 65, col 20
if _v is not None: write(_filter(_v, rawExpr="$self.get_class_inherits_from_db('ABC')")) # from line 65, col 20.
write('''):
''')
else: # generated from line 66, col 1
write('''class ''')
_v = VFFSL(SL,"safe_name",True) # '${safe_name}' on line 67, col 7
if _v is not None: write(_filter(_v, rawExpr='${safe_name}')) # from line 67, col 7.
write('''(''')
_v = VFN(VFFSL(SL,"self",True),"get_class_inherits",False)(VFFSL(SL,"name",True), VFFSL(SL,"inherits",True)) # '$self.get_class_inherits($name, $inherits)' on line 67, col 20
if _v is not None: write(_filter(_v, rawExpr='$self.get_class_inherits($name, $inherits)')) # from line 67, col 20.
write('''):
''')
write(''' """
''')
for line in VFN(VFFSL(SL,"self",True),"line_gen",False)(VFFSL(SL,"desc",True)): # generated from line 70, col 5
write(''' ''')
_v = VFFSL(SL,"line",True) # '$line' on line 71, col 5
if _v is not None: write(_filter(_v, rawExpr='$line')) # from line 71, col 5.
write('''
''')
if VFFSL(SL,"link",True): # generated from line 73, col 1
write('''
See Also:
`API ''')
_v = VFFSL(SL,"name",True) # '$name' on line 76, col 14
if _v is not None: write(_filter(_v, rawExpr='$name')) # from line 76, col 14.
write(''' <''')
_v = VFFSL(SL,"link",True) # '$link' on line 76, col 21
if _v is not None: write(_filter(_v, rawExpr='$link')) # from line 76, col 21.
write('''>`_
''')
write(''' """
__pyunointerface__: Literal[\'''')
_v = VFFSL(SL,"fullname",True) # '$fullname' on line 79, col 34
if _v is not None: write(_filter(_v, rawExpr='$fullname')) # from line 79, col 34.
write("""']
""")
if VFFSL(SL,"is_method",True): # generated from line 81, col 1
methods = VFFSL(SL,"attribs",True)['methods']
for method in VFFSL(SL,"methods",True): # generated from line 83, col 5
m_desc = VFFSL(SL,"method",True)['desc']
out_args = VFN(VFFSL(SL,"self",True),"get_out_args",False)(VFFSL(SL,"method",True))
raises = VFN(VFFSL(SL,"self",True),"get_raises_list",False)(VFFSL(SL,"method",True))
write(''' ''')
_v = VFN(VFFSL(SL,"self",True),"get_formated_meth",False)(VFFSL(SL,"method",True)) # '$self.get_formated_meth($method)' on line 87, col 5
if _v is not None: write(_filter(_v, rawExpr='$self.get_formated_meth($method)')) # from line 87, col 5.
write('''
"""
''')
for line in VFN(VFFSL(SL,"self",True),"line_gen",False)(VFFSL(SL,"m_desc",True)): # generated from line 89, col 9
write(''' ''')
_v = VFFSL(SL,"line",True) # '$line' on line 90, col 9
if _v is not None: write(_filter(_v, rawExpr='$line')) # from line 90, col 9.
write('''
''')
if len(VFFSL(SL,"out_args",True)) > 0: # generated from line 92, col 5
write('''
''')
for arg in VFFSL(SL,"out_args",True): # generated from line 94, col 9
write(''' * ``''')
_v = VFFSL(SL,"arg",True) # '${arg}' on line 95, col 13
if _v is not None: write(_filter(_v, rawExpr='${arg}')) # from line 95, col 13.
write('''`` is an out direction argument.
''')
if len(VFFSL(SL,"raises",True)) > 0: # generated from line 98, col 5
write('''
Raises:
''')
for itm in VFFSL(SL,"raises",True): # generated from line 101, col 9
write(''' ''')
_v = VFFSL(SL,"itm",True)[0] # '$itm[0]' on line 102, col 13
if _v is not None: write(_filter(_v, rawExpr='$itm[0]')) # from line 102, col 13.
write(''': ``''')
_v = VFFSL(SL,"itm",True)[1] # '$itm[1]' on line 102, col 24
if _v is not None: write(_filter(_v, rawExpr='$itm[1]')) # from line 102, col 24.
write('''``
''')
write(''' """
''')
if VFFSL(SL,"is_types",True): # generated from line 108, col 1
properties = VFFSL(SL,"attribs",True)['types']
for property in VFFSL(SL,"properties",True): # generated from line 110, col 1
p_name = VFN(VFFSL(SL,"self",True),"get_safe_word",False)(VFFSL(SL,"property",True)['name'])
p_return = VFN(VFFSL(SL,"self",True),"get_q_type",False)(VFFSL(SL,"property",True)['returns'])
p_desc = VFFSL(SL,"property",True)['desc']
write(''' @property
def ''')
_v = VFFSL(SL,"p_name",True) # '${p_name}' on line 115, col 9
if _v is not None: write(_filter(_v, rawExpr='${p_name}')) # from line 115, col 9.
write('''(self) -> ''')
_v = VFFSL(SL,"p_return",True) # '$p_return' on line 115, col 28
if _v is not None: write(_filter(_v, rawExpr='$p_return')) # from line 115, col 28.
write(''':
"""
''')
for line in VFN(VFFSL(SL,"self",True),"line_gen",False)(VFFSL(SL,"p_desc",True)): # generated from line 117, col 5
write(''' ''')
_v = VFFSL(SL,"line",True) # '$line' on line 118, col 9
if _v is not None: write(_filter(_v, rawExpr='$line')) # from line 118, col 9.
write('''
''')
if VFN(VFFSL(SL,"self",True),"get_prop_has_errors",False)(VFFSL(SL,"property",True)): # generated from line 120, col 5
write('''
Raises:
''')
for long, short in VFN(VFFSL(SL,"self",True),"get_prop_get_raises",False)(VFFSL(SL,"property",True)): # generated from line 124, col 5
write(''' ''')
_v = VFFSL(SL,"long",True) # '$long' on line 125, col 13
if _v is not None: write(_filter(_v, rawExpr='$long')) # from line 125, col 13.
write(''': get raises ``''')
_v = VFFSL(SL,"short",True) # '$short' on line 125, col 33
if _v is not None: write(_filter(_v, rawExpr='$short')) # from line 125, col 33.
write('''``
''')
for long, short in VFN(VFFSL(SL,"self",True),"get_prop_set_raises",False)(VFFSL(SL,"property",True)): # generated from line 127, col 5
write(''' ''')
_v = VFFSL(SL,"long",True) # '$long' on line 128, col 13
if _v is not None: write(_filter(_v, rawExpr='$long')) # from line 128, col 13.
write(''': set raises ``''')
_v = VFFSL(SL,"short",True) # '$short' on line 128, col 33
if _v is not None: write(_filter(_v, rawExpr='$short')) # from line 128, col 33.
write('''``
''')
write(''' """
''')
if VFFSL(SL,"is_properties",True): # generated from line 135, col 1
properties = VFFSL(SL,"attribs",True)['properties']
for property in VFFSL(SL,"properties",True): # generated from line 137, col 1
p_name = VFN(VFFSL(SL,"self",True),"get_safe_word",False)(VFFSL(SL,"property",True)['name'])
p_return = VFN(VFFSL(SL,"self",True),"get_q_type",False)(VFFSL(SL,"property",True)['returns'])
p_desc = VFFSL(SL,"property",True)['desc']
write(''' @property
def ''')
_v = VFFSL(SL,"p_name",True) # '${p_name}' on line 142, col 9
if _v is not None: write(_filter(_v, rawExpr='${p_name}')) # from line 142, col 9.
write('''(self) -> ''')
_v = VFFSL(SL,"p_return",True) # '$p_return' on line 142, col 28
if _v is not None: write(_filter(_v, rawExpr='$p_return')) # from line 142, col 28.
write(''':
"""
''')
for line in VFN(VFFSL(SL,"self",True),"line_gen",False)(VFFSL(SL,"p_desc",True)): # generated from line 144, col 5
write(''' ''')
_v = VFFSL(SL,"line",True) # '$line' on line 145, col 9
if _v is not None: write(_filter(_v, rawExpr='$line')) # from line 145, col 9.
write('''
''')
if VFN(VFFSL(SL,"self",True),"get_prop_has_errors",False)(VFFSL(SL,"property",True)): # generated from line 147, col 5
write('''
Raises:
''')
for long, short in VFN(VFFSL(SL,"self",True),"get_prop_get_raises",False)(VFFSL(SL,"property",True)): # generated from line 151, col 5
write(''' ''')
_v = VFFSL(SL,"long",True) # '$long' on line 152, col 13
if _v is not None: write(_filter(_v, rawExpr='$long')) # from line 152, col 13.
write(''': get raises ``''')
_v = VFFSL(SL,"short",True) # '$short' on line 152, col 33
if _v is not None: write(_filter(_v, rawExpr='$short')) # from line 152, col 33.
write('''``
''')
for long, short in VFN(VFFSL(SL,"self",True),"get_prop_set_raises",False)(VFFSL(SL,"property",True)): # generated from line 154, col 5
write(''' ''')
_v = VFFSL(SL,"long",True) # '$long' on line 155, col 13
if _v is not None: write(_filter(_v, rawExpr='$long')) # from line 155, col 13.
write(''': set raises ``''')
_v = VFFSL(SL,"short",True) # '$short' on line 155, col 33
if _v is not None: write(_filter(_v, rawExpr='$short')) # from line 155, col 33.
write('''``
''')
write(''' """
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
name = ""
namespace = ""
allow_db = True
libre_office_ver = False
desc = []
quote = set()
typings = set()
link = ""
requires_typing = False
inherits = []
extends_map = {}
imports = []
from_imports = []
from_imports_typing = []
attribs = {}
_mainCheetahMethod_for_x_pyi = 'respond'
## END CLASS DEFINITION
if not hasattr(x_pyi, '_initCheetahAttributes'):
templateAPIClass = getattr(x_pyi,
'_CHEETAH_templateClass',
Template)
templateAPIClass._addCheetahPlumbingCodeToClass(x_pyi)
# CHEETAH was developed by <NAME> and <NAME>
# with code, advice and input from many other volunteers.
# For more information visit https://cheetahtemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=x_pyi()).run()
|
#!/usr/bin/python
import os.path
import posixpath
import pytest
from aspen.testing.harness import Harness
tablefile = os.path.join(os.path.dirname(__file__), 'dispatch_table_data.rst')
def find_cols(defline, header_char='='):
"""
return a sorted list of (start, end) indexes into defline that
are the beginning and ending indexes of column definitions
based on a reStructuredText table header line.
Note that this is a braindead simple version that only understands
header_chars and spaces (no other whitespace)
"""
i = 0;
colstarts = []
colends = []
while i < len(defline):
if len(colstarts) <= len(colends):
nextstart = defline.find(header_char, i)
if nextstart >= 0:
colstarts.append(nextstart)
i = nextstart
else:
break
else:
nextend = defline.find(' ',i)
if nextend >= 0:
colends.append(nextend)
i = nextend
else:
colends.append(len(defline))
break
return zip(colstarts,colends)
def fields_from(dataline, cols):
"""
Given a data line and a set of column definitions,
strip the data and return it as a list
"""
fields = []
for start, fin in cols:
fields.append(dataline[start:fin].strip())
return fields
def get_table_entries():
table = open(tablefile,'r').readlines()
tabledefline = table[0].strip()
cols = find_cols(tabledefline)
headers = fields_from(table[1], cols)
inputfiles = headers[:headers.index('/')]
requests = headers[headers.index('/'):]
# We 'know' that table[0] == table[2], both header deflines, so skip down
results = []
for line in table[3:]:
if line.strip() == tabledefline: break # found ending header, ignore the rest
if line.strip().startswith('#'): continue # skip comment lines
fields = fields_from(line, cols)
files = [ x for i, x in enumerate(inputfiles) if fields[i] == 'X' ]
expected = fields[len(inputfiles):]
results += [ (files, r, expected[i]) for i, r in enumerate(requests) ]
return results
def format_result(request, dispatch_result=None, **ignored):
"""
turn a raw request result into a string compatible with the table-driven test
"""
wilds = request.line.uri.path
wildtext = ",".join("%s='%s'" % (k, wilds[k]) for k in sorted(wilds))
result = dispatch_result.match if dispatch_result else ''
if wildtext: result += " (%s)" % wildtext
return result
GENERIC_SPT = """
[-----]
[-----] text/plain
Greetings, Program!
"""
@pytest.mark.parametrize("files,request_uri,expected", get_table_entries())
def test_all_table_entries(harness, files, request_uri, expected):
# set up the specified files
realfiles = tuple([ f if f.endswith('/') else (f, GENERIC_SPT) for f in files ])
harness.fs.www.mk(*realfiles)
# make the request and get the response code and the request object (sadly we can't get both with one request)
response = harness.simple(uripath=request_uri, filepath=None, want='response', raise_immediately=False)
result = unicode(response.code)
if result == '200':
state = harness.simple( uripath=request_uri
, filepath=None
, want='state'
, raise_immediately=False
)
path = format_result(**state)
if os.sep != posixpath.sep:
path = path.replace(os.sep, posixpath.sep)
path = path[len(harness.fs.www.root)+1:]
if path:
result += " " + path
elif result == '302':
result += ' ' + response.headers['Location']
if expected.endswith("*"):
expected = expected[:-1]
assert result == expected, "Requesting %r, got %r instead of %r" % (request_uri, result, expected)
if __name__ == '__main__':
# output the table with answers the current dispatcher gives
# currently this has to be run manually with:
# ./env/bin/python tests/dispatch_table_test | grep -v ^pid
table = open(tablefile,'r').readlines()
tabledefline = table[0].strip()
cols = find_cols(tabledefline)
headers = fields_from(table[1], cols)
answercol = headers.index('/')
inputfiles = headers[:answercol]
requests = headers[answercol:]
for line in table[:3]:
print(line)
# We 'know' that table[0] == table[2], both header deflines, so skip down
for line in table[3:]:
if line.strip() == tabledefline: break # found ending header, ignore the rest
if line.strip().startswith('#'): continue # skip comment lines
fields = fields_from(line, cols)
files = [ x for i, x in enumerate(inputfiles) if fields[i] == 'X' ]
expected = fields[len(inputfiles):]
resultcolstart = cols[answercol][0]
resultline = line[:resultcolstart] # input files
harness = Harness()
realfiles = tuple([ f if f.endswith('/') else (f, GENERIC_SPT) for f in files ])
harness.fs.www.mk(*realfiles)
for i,request_uri in enumerate(requests):
result = unicode(harness.simple(uripath=request_uri, filepath=None, want='response.code', raise_immediately=False))
if result not in [ '404' ]:
state = harness.simple( uripath=request_uri
, filepath=None
, want='state'
, raise_immediately=False
)
path = format_result(**state)
path = path[len(harness.fs.www.root)+1:]
if path:
result += " " + path
col = answercol + i
resultline += result + (' ' * (cols[col][1] - cols[col][0] - len(result)))
if col < len(cols) - 1:
resultline += ' ' * (cols[col+1][0] - cols[col][1])
print(resultline)
|
"""
Tests for Markov Autoregression models
Author: <NAME>
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
from statsmodels.compat.testing import skip
import warnings
import os
import numpy as np
import pandas as pd
from statsmodels.tools import add_constant
from statsmodels.tsa.regime_switching import markov_autoregression
from numpy.testing import assert_equal, assert_allclose, assert_raises
current_path = os.path.dirname(os.path.abspath(__file__))
rgnp = [2.59316421, 2.20217133, 0.45827562, 0.9687438,
-0.24130757, 0.89647478, 2.05393219, 1.73353648,
0.93871289, -0.46477833, -0.80983406, -1.39763689,
-0.39886093, 1.1918416, 1.45620048, 2.11808228,
1.08957863, 1.32390273, 0.87296367, -0.19773273,
0.45420215, 0.07221876, 1.1030364, 0.82097489,
-0.05795795, 0.58447772, -1.56192672, -2.05041027,
0.53637183, 2.33676839, 2.34014559, 1.2339263,
1.8869648, -0.45920792, 0.84940469, 1.70139849,
-0.28756312, 0.09594627, -0.86080289, 1.03447127,
1.23685944, 1.42004502, 2.22410631, 1.30210173,
1.03517699, 0.9253425, -0.16559951, 1.3444382,
1.37500131, 1.73222184, 0.71605635, 2.21032143,
0.85333031, 1.00238776, 0.42725441, 2.14368343,
1.43789184, 1.57959926, 2.27469826, 1.95962656,
0.25992399, 1.01946914, 0.49016398, 0.5636338,
0.5959546, 1.43082857, 0.56230122, 1.15388393,
1.68722844, 0.77438205, -0.09647045, 1.39600146,
0.13646798, 0.55223715, -0.39944872, -0.61671102,
-0.08722561, 1.2101835, -0.90729755, 2.64916158,
-0.0080694, 0.51111895, -0.00401437, 2.16821432,
1.92586732, 1.03504717, 1.85897219, 2.32004929,
0.25570789, -0.09855274, 0.89073682, -0.55896485,
0.28350255, -1.31155407, -0.88278776, -1.97454941,
1.01275265, 1.68264723, 1.38271284, 1.86073637,
0.4447377, 0.41449001, 0.99202275, 1.36283576,
1.59970522, 1.98845816, -0.25684232, 0.87786949,
3.1095655, 0.85324478, 1.23337317, 0.00314302,
-0.09433369, 0.89883322, -0.19036628, 0.99772376,
-2.39120054, 0.06649673, 1.26136017, 1.91637838,
-0.3348029, 0.44207108, -1.40664911, -1.52129889,
0.29919869, -0.80197448, 0.15204792, 0.98585027,
2.13034606, 1.34397924, 1.61550522, 2.70930099,
1.24461412, 0.50835466, 0.14802167]
rec = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
def test_predict():
# AR(1) without mean, k_regimes=2
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, trend='nc')
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_{t} = 0
resids[0, :, :] = np.ones(9) - 0.1 * np.ones(9)
assert_allclose(mod_resid[0, :, :], resids[0, :, :])
# Resids when: S_{t} = 1
resids[1, :, :] = np.ones(9) - 0.5 * np.ones(9)
assert_allclose(mod_resid[1, :, :], resids[1, :, :])
# AR(1) with mean, k_regimes=2
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 2.)
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 3.)
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 2.)
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 3.)
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
# AR(2) with mean, k_regimes=3
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=2)
assert_equal(mod.nobs, 8)
assert_equal(mod.endog, np.arange(2, 10))
params = np.r_[[0.3] * 6, 2., 3., 4, 1., 0.1, 0.5, 0.8, -0.05, -0.25, -0.4]
mod_resid = mod._resid(params)
resids = np.zeros((3, 3, 3, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0, S_{t-2} = 0
resids[0, 0, 0, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 2.) -
(-0.05) * (np.arange(8) - 2.))
assert_allclose(mod_resid[0, 0, 0, :], resids[0, 0, 0, :])
# Resids when: S_t = 1, S_{t-1} = 0, S_{t-2} = 0
resids[1, 0, 0, :] = (
(np.arange(2, 10) - 3.) -
0.5 * (np.arange(1, 9) - 2.) -
(-0.25) * (np.arange(8) - 2.))
assert_allclose(mod_resid[1, 0, 0, :], resids[1, 0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 2, S_{t-2} = 1
resids[0, 2, 1, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 4.) -
(-0.05) * (np.arange(8) - 3.))
assert_allclose(mod_resid[0, 2, 1, :], resids[0, 2, 1, :])
# AR(1) with mean + non-switching exog
endog = np.arange(10)
exog = np.r_[0.4, 5, 0.2, 1.2, -0.3, 2.5, 0.2, -0.7, 2., -1.1]
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, exog=exog)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
def test_conditional_likelihoods():
# AR(1) without mean, k_regimes=2, non-switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 2., 3., 2., 0.1, 0.5]
resid = mod._resid(params)
conditional_likelihoods = (
np.exp(-0.5 * resid**2 / 2) / np.sqrt(2 * np.pi * 2))
assert_equal(mod._conditional_likelihoods(params), conditional_likelihoods)
# AR(1) without mean, k_regimes=3, switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=1, switching_variance=True)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[[0.3]*6, 2., 3., 4., 1.5, 3., 4.5, 0.1, 0.5, 0.8]
mod_conditional_likelihoods = mod._conditional_likelihoods(params)
conditional_likelihoods = mod._resid(params)
# S_t = 0
conditional_likelihoods[0, :, :] = (
np.exp(-0.5 * conditional_likelihoods[0, :, :]**2 / 1.5) /
np.sqrt(2 * np.pi * 1.5))
assert_allclose(mod_conditional_likelihoods[0, :, :],
conditional_likelihoods[0, :, :])
# S_t = 1
conditional_likelihoods[1, :, :] = (
np.exp(-0.5 * conditional_likelihoods[1, :, :]**2 / 3.) /
np.sqrt(2 * np.pi * 3.))
assert_allclose(mod_conditional_likelihoods[1, :, :],
conditional_likelihoods[1, :, :])
# S_t = 2
conditional_likelihoods[2, :, :] = (
np.exp(-0.5 * conditional_likelihoods[2, :, :]**2 / 4.5) /
np.sqrt(2 * np.pi * 4.5))
assert_allclose(mod_conditional_likelihoods[2, :, :],
conditional_likelihoods[2, :, :])
class MarkovAutoregression(object):
@classmethod
def setup_class(cls, true, endog, atol=1e-5, rtol=1e-7, **kwargs):
cls.model = markov_autoregression.MarkovAutoregression(endog, **kwargs)
cls.true = true
cls.result = cls.model.smooth(cls.true['params'])
cls.atol = atol
cls.rtol = rtol
def test_llf(self):
assert_allclose(self.result.llf, self.true['llf'], atol=self.atol,
rtol=self.rtol)
def test_fit(self, **kwargs):
# Test fitting against Stata
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = self.model.fit(disp=False, **kwargs)
assert_allclose(res.llf, self.true['llf_fit'], atol=self.atol,
rtol=self.rtol)
def test_fit_em(self, **kwargs):
# Test EM fitting (smoke test)
res_em = self.model._fit_em(**kwargs)
assert_allclose(res_em.llf, self.true['llf_fit_em'], atol=self.atol,
rtol=self.rtol)
hamilton_ar2_short_filtered_joint_probabilities = np.array([[[[
4.99506987e-02, 6.44048275e-04, 6.22227140e-05,
4.45756755e-06, 5.26645567e-07, 7.99846146e-07,
1.19425705e-05, 6.87762063e-03],
[ 1.95930395e-02, 3.25884335e-04, 1.12955091e-04,
3.38537103e-04, 9.81927968e-06, 2.71696750e-05,
5.83828290e-03, 7.64261509e-02]],
[[ 1.97113193e-03, 9.50372207e-05, 1.98390978e-04,
1.88188953e-06, 4.83449400e-07, 1.14872860e-05,
4.02918239e-06, 4.35015431e-04],
[ 2.24870443e-02, 1.27331172e-03, 9.62155856e-03,
4.04178695e-03, 2.75516282e-04, 1.18179572e-02,
5.99778157e-02, 1.48149567e-01]]],
[[[ 6.70912859e-02, 1.84223872e-02, 2.55621792e-04,
4.48500688e-05, 7.80481515e-05, 2.73734559e-06,
7.59835896e-06, 1.42930726e-03],
[ 2.10053328e-02, 7.44036383e-03, 3.70388879e-04,
2.71878370e-03, 1.16152088e-03, 7.42182691e-05,
2.96490192e-03, 1.26774695e-02]],
[[ 8.09335679e-02, 8.31016518e-02, 2.49149080e-02,
5.78825626e-04, 2.19019941e-03, 1.20179130e-03,
7.83659430e-05, 2.76363377e-03],
[ 7.36967899e-01, 8.88697316e-01, 9.64463954e-01,
9.92270877e-01, 9.96283886e-01, 9.86863839e-01,
9.31117063e-01, 7.51241236e-01]]]])
hamilton_ar2_short_predicted_joint_probabilities = np.array([[[[[
1.20809334e-01, 3.76964436e-02, 4.86045844e-04,
4.69578023e-05, 3.36400588e-06, 3.97445190e-07,
6.03622290e-07, 9.01273552e-06],
[ 3.92723623e-02, 1.47863379e-02, 2.45936108e-04,
8.52441571e-05, 2.55484811e-04, 7.41034525e-06,
2.05042201e-05, 4.40599447e-03]],
[[ 4.99131230e-03, 1.48756005e-03, 7.17220245e-05,
1.49720314e-04, 1.42021122e-06, 3.64846209e-07,
8.66914462e-06, 3.04071516e-06],
[ 4.70476003e-02, 1.69703652e-02, 9.60933974e-04,
7.26113047e-03, 3.05022748e-03, 2.07924699e-04,
8.91869322e-03, 4.52636381e-02]]],
[[[ 4.99131230e-03, 6.43506069e-03, 1.76698327e-03,
2.45179642e-05, 4.30179435e-06, 7.48598845e-06,
2.62552503e-07, 7.28796600e-07],
[ 1.62256192e-03, 2.01472650e-03, 7.13642497e-04,
3.55258493e-05, 2.60772139e-04, 1.11407276e-04,
7.11864528e-06, 2.84378568e-04]],
[[ 5.97950448e-03, 7.76274317e-03, 7.97069493e-03,
2.38971340e-03, 5.55180599e-05, 2.10072977e-04,
1.15269812e-04, 7.51646942e-06],
[ 5.63621989e-02, 7.06862760e-02, 8.52394030e-02,
9.25065601e-02, 9.51736612e-02, 9.55585689e-02,
9.46550451e-02, 8.93080931e-02]]]],
[[[[ 3.92723623e-02, 1.22542551e-02, 1.58002431e-04,
1.52649118e-05, 1.09356167e-06, 1.29200377e-07,
1.96223855e-07, 2.92983500e-06],
[ 1.27665503e-02, 4.80670161e-03, 7.99482261e-05,
2.77109335e-05, 8.30522919e-05, 2.40893443e-06,
6.66545485e-06, 1.43228843e-03]],
[[ 1.62256192e-03, 4.83571884e-04, 2.33151963e-05,
4.86706634e-05, 4.61678312e-07, 1.18603191e-07,
2.81814142e-06, 9.88467229e-07],
[ 1.52941031e-02, 5.51667911e-03, 3.12377744e-04,
2.36042810e-03, 9.91559466e-04, 6.75915830e-05,
2.89926399e-03, 1.47141776e-02]]],
[[[ 4.70476003e-02, 6.06562252e-02, 1.66554040e-02,
2.31103828e-04, 4.05482745e-05, 7.05621631e-05,
2.47479309e-06, 6.86956236e-06],
[ 1.52941031e-02, 1.89906063e-02, 6.72672133e-03,
3.34863029e-04, 2.45801156e-03, 1.05011361e-03,
6.70996238e-05, 2.68052335e-03]],
[[ 5.63621989e-02, 7.31708248e-02, 7.51309569e-02,
2.25251946e-02, 5.23307566e-04, 1.98012644e-03,
1.08652148e-03, 7.08494735e-05],
[ 5.31264334e-01, 6.66281623e-01, 8.03457913e-01,
8.71957394e-01, 8.97097216e-01, 9.00725317e-01,
8.92208794e-01, 8.41808970e-01]]]]])
hamilton_ar2_short_smoothed_joint_probabilities = np.array([[[[
1.29898189e-02, 1.66298475e-04, 1.29822987e-05,
9.95268382e-07, 1.84473346e-07, 7.18761267e-07,
1.69576494e-05, 6.87762063e-03],
[ 5.09522472e-03, 8.41459714e-05, 2.35672254e-05,
7.55872505e-05, 3.43949612e-06, 2.44153330e-05,
8.28997024e-03, 7.64261509e-02]],
[[ 5.90021731e-04, 2.55342733e-05, 4.50698224e-05,
5.30734135e-07, 1.80741761e-07, 1.11483792e-05,
5.98539007e-06, 4.35015431e-04],
[ 6.73107901e-03, 3.42109009e-04, 2.18579464e-03,
1.13987259e-03, 1.03004157e-04, 1.14692946e-02,
8.90976350e-02, 1.48149567e-01]]],
[[[ 6.34648123e-02, 1.79187451e-02, 2.37462147e-04,
3.55542558e-05, 7.63980455e-05, 2.90520820e-06,
8.17644492e-06, 1.42930726e-03],
[ 1.98699352e-02, 7.23695477e-03, 3.44076057e-04,
2.15527721e-03, 1.13696383e-03, 7.87695658e-05,
3.19047276e-03, 1.26774695e-02]],
[[ 8.81925054e-02, 8.33092133e-02, 2.51106301e-02,
5.81007470e-04, 2.19065072e-03, 1.20221350e-03,
7.56893839e-05, 2.76363377e-03],
[ 8.03066603e-01, 8.90916999e-01, 9.72040418e-01,
9.96011175e-01, 9.96489179e-01, 9.87210535e-01,
8.99315113e-01, 7.51241236e-01]]]])
class TestHamiltonAR2Short(MarkovAutoregression):
# This is just a set of regression tests
@classmethod
def setup_class(cls):
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521],
'llf': -10.14066,
'llf_fit': -4.0523073,
'llf_fit_em': -8.885836
}
super(TestHamiltonAR2Short, cls).setup_class(
true, rgnp[-10:], k_regimes=2, order=2, switching_ar=False)
def test_fit_em(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
super(TestHamiltonAR2Short, self).test_fit_em()
def test_filter_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
def test_smoother_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
# Smoothed, entry-by-entry
assert_allclose(
res.smoothed_joint_probabilities[..., -1],
hamilton_ar2_short_smoothed_joint_probabilities[..., -1])
assert_allclose(
res.smoothed_joint_probabilities[..., -2],
hamilton_ar2_short_smoothed_joint_probabilities[..., -2])
assert_allclose(
res.smoothed_joint_probabilities[..., -3],
hamilton_ar2_short_smoothed_joint_probabilities[..., -3])
assert_allclose(
res.smoothed_joint_probabilities[..., :-3],
hamilton_ar2_short_smoothed_joint_probabilities[..., :-3])
hamilton_ar4_filtered = [
0.776712, 0.949192, 0.996320, 0.990258, 0.940111, 0.537442,
0.140001, 0.008942, 0.048480, 0.614097, 0.910889, 0.995463,
0.979465, 0.992324, 0.984561, 0.751038, 0.776268, 0.522048,
0.814956, 0.821786, 0.472729, 0.673567, 0.029031, 0.001556,
0.433276, 0.985463, 0.995025, 0.966067, 0.998445, 0.801467,
0.960997, 0.996431, 0.461365, 0.199357, 0.027398, 0.703626,
0.946388, 0.985321, 0.998244, 0.989567, 0.984510, 0.986811,
0.793788, 0.973675, 0.984848, 0.990418, 0.918427, 0.998769,
0.977647, 0.978742, 0.927635, 0.998691, 0.988934, 0.991654,
0.999288, 0.999073, 0.918636, 0.987710, 0.966876, 0.910015,
0.826150, 0.969451, 0.844049, 0.941525, 0.993363, 0.949978,
0.615206, 0.970915, 0.787585, 0.707818, 0.200476, 0.050835,
0.140723, 0.809850, 0.086422, 0.990344, 0.785963, 0.817425,
0.659152, 0.996578, 0.992860, 0.948501, 0.996883, 0.999712,
0.906694, 0.725013, 0.963690, 0.386960, 0.241302, 0.009078,
0.015789, 0.000896, 0.541530, 0.928686, 0.953704, 0.992741,
0.935877, 0.918958, 0.977316, 0.987941, 0.987300, 0.996769,
0.645469, 0.921285, 0.999917, 0.949335, 0.968914, 0.886025,
0.777141, 0.904381, 0.368277, 0.607429, 0.002491, 0.227610,
0.871284, 0.987717, 0.288705, 0.512124, 0.030329, 0.005177,
0.256183, 0.020955, 0.051620, 0.549009, 0.991715, 0.987892,
0.995377, 0.999833, 0.993756, 0.956164, 0.927714]
hamilton_ar4_smoothed = [
0.968096, 0.991071, 0.998559, 0.958534, 0.540652, 0.072784,
0.010999, 0.006228, 0.172144, 0.898574, 0.989054, 0.998293,
0.986434, 0.993248, 0.976868, 0.858521, 0.847452, 0.675670,
0.596294, 0.165407, 0.035270, 0.127967, 0.007414, 0.004944,
0.815829, 0.998128, 0.998091, 0.993227, 0.999283, 0.921100,
0.977171, 0.971757, 0.124680, 0.063710, 0.114570, 0.954701,
0.994852, 0.997302, 0.999345, 0.995817, 0.996218, 0.994580,
0.933990, 0.996054, 0.998151, 0.996976, 0.971489, 0.999786,
0.997362, 0.996755, 0.993053, 0.999947, 0.998469, 0.997987,
0.999830, 0.999360, 0.953176, 0.992673, 0.975235, 0.938121,
0.946784, 0.986897, 0.905792, 0.969755, 0.995379, 0.914480,
0.772814, 0.931385, 0.541742, 0.394596, 0.063428, 0.027829,
0.124527, 0.286105, 0.069362, 0.995950, 0.961153, 0.962449,
0.945022, 0.999855, 0.998943, 0.980041, 0.999028, 0.999838,
0.863305, 0.607421, 0.575983, 0.013300, 0.007562, 0.000635,
0.001806, 0.002196, 0.803550, 0.972056, 0.984503, 0.998059,
0.985211, 0.988486, 0.994452, 0.994498, 0.998873, 0.999192,
0.870482, 0.976282, 0.999961, 0.984283, 0.973045, 0.786176,
0.403673, 0.275418, 0.115199, 0.257560, 0.004735, 0.493936,
0.907360, 0.873199, 0.052959, 0.076008, 0.001653, 0.000847,
0.062027, 0.021257, 0.219547, 0.955654, 0.999851, 0.997685,
0.998324, 0.999939, 0.996858, 0.969209, 0.927714]
class TestHamiltonAR4(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c
# List of non-switching regressors:
# ar(1) ar(2) ar(3) ar(4)
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521,
-0.246983, -0.212923],
'llf': -181.26339,
'llf_fit': -181.26339,
'llf_fit_em': -183.85444,
'bse_oim': np.r_[.0965189, .0377362, .2645396, .0745187, np.nan,
.1199942, .137663, .1069103, .1105311, ]
}
super(TestHamiltonAR4, cls).setup_class(
true, rgnp, k_regimes=2, order=4, switching_ar=False)
def test_filtered_regimes(self):
res = self.result
assert_equal(len(res.filtered_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.filtered_marginal_probabilities[:, 1],
hamilton_ar4_filtered, atol=1e-5)
def test_smoothed_regimes(self):
res = self.result
assert_equal(len(res.smoothed_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.smoothed_marginal_probabilities[:, 1],
hamilton_ar4_smoothed, atol=1e-5)
def test_bse(self):
# Can't compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-6)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-6)
class TestHamiltonAR2Switch(MarkovAutoregression):
# Results from Stata, see http://www.stata.com/manuals14/tsmswitch.pdf
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results',
'results_predict_rgnp.csv')
results = pd.read_csv(path)
true = {
'params': np.r_[.3812383, .3564492, -.0055216, 1.195482,
.6677098**2, .3710719, .4621503, .7002937,
-.3206652],
'llf': -179.32354,
'llf_fit': -179.38684,
'llf_fit_em': -184.99606,
'bse_oim': np.r_[.1424841, .0994742, .2057086, .1225987, np.nan,
.1754383, .1652473, .187409, .1295937],
'smoothed0': results.iloc[3:]['switchar2_sm1'],
'smoothed1': results.iloc[3:]['switchar2_sm2'],
'predict0': results.iloc[3:]['switchar2_yhat1'],
'predict1': results.iloc[3:]['switchar2_yhat2'],
'predict_predicted': results.iloc[3:]['switchar2_pyhat'],
'predict_filtered': results.iloc[3:]['switchar2_fyhat'],
'predict_smoothed': results.iloc[3:]['switchar2_syhat'],
}
super(TestHamiltonAR2Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=2)
def test_smoothed_marginal_probabilities(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.true['smoothed0'], atol=1e-6)
assert_allclose(self.result.smoothed_marginal_probabilities[:, 1],
self.true['smoothed1'], atol=1e-6)
def test_predict(self):
# Smoothed
actual = self.model.predict(
self.true['params'], probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.model.predict(
self.true['params'], probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
def test_bse(self):
# Can't compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-7)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-7)
hamilton_ar1_switch_filtered = [
0.840288, 0.730337, 0.900234, 0.596492, 0.921618, 0.983828,
0.959039, 0.898366, 0.477335, 0.251089, 0.049367, 0.386782,
0.942868, 0.965632, 0.982857, 0.897603, 0.946986, 0.916413,
0.640912, 0.849296, 0.778371, 0.954420, 0.929906, 0.723930,
0.891196, 0.061163, 0.004806, 0.977369, 0.997871, 0.977950,
0.896580, 0.963246, 0.430539, 0.906586, 0.974589, 0.514506,
0.683457, 0.276571, 0.956475, 0.966993, 0.971618, 0.987019,
0.916670, 0.921652, 0.930265, 0.655554, 0.965858, 0.964981,
0.976790, 0.868267, 0.983240, 0.852052, 0.919150, 0.854467,
0.987868, 0.935840, 0.958138, 0.979535, 0.956541, 0.716322,
0.919035, 0.866437, 0.899609, 0.914667, 0.976448, 0.867252,
0.953075, 0.977850, 0.884242, 0.688299, 0.968461, 0.737517,
0.870674, 0.559413, 0.380339, 0.582813, 0.941311, 0.240020,
0.999349, 0.619258, 0.828343, 0.729726, 0.991009, 0.966291,
0.899148, 0.970798, 0.977684, 0.695877, 0.637555, 0.915824,
0.434600, 0.771277, 0.113756, 0.144002, 0.008466, 0.994860,
0.993173, 0.961722, 0.978555, 0.789225, 0.836283, 0.940383,
0.968368, 0.974473, 0.980248, 0.518125, 0.904086, 0.993023,
0.802936, 0.920906, 0.685445, 0.666524, 0.923285, 0.643861,
0.938184, 0.008862, 0.945406, 0.990061, 0.991500, 0.486669,
0.805039, 0.089036, 0.025067, 0.863309, 0.352784, 0.733295,
0.928710, 0.984257, 0.926597, 0.959887, 0.984051, 0.872682,
0.824375, 0.780157]
hamilton_ar1_switch_smoothed = [
0.900074, 0.758232, 0.914068, 0.637248, 0.901951, 0.979905,
0.958935, 0.888641, 0.261602, 0.148761, 0.056919, 0.424396,
0.932184, 0.954962, 0.983958, 0.895595, 0.949519, 0.923473,
0.678898, 0.848793, 0.807294, 0.958868, 0.942936, 0.809137,
0.960892, 0.032947, 0.007127, 0.967967, 0.996551, 0.979278,
0.896181, 0.987462, 0.498965, 0.908803, 0.986893, 0.488720,
0.640492, 0.325552, 0.951996, 0.959703, 0.960914, 0.986989,
0.916779, 0.924570, 0.935348, 0.677118, 0.960749, 0.958966,
0.976974, 0.838045, 0.986562, 0.847774, 0.908866, 0.821110,
0.984965, 0.915302, 0.938196, 0.976518, 0.973780, 0.744159,
0.922006, 0.873292, 0.904035, 0.917547, 0.978559, 0.870915,
0.948420, 0.979747, 0.884791, 0.711085, 0.973235, 0.726311,
0.828305, 0.446642, 0.411135, 0.639357, 0.973151, 0.141707,
0.999805, 0.618207, 0.783239, 0.672193, 0.987618, 0.964655,
0.877390, 0.962437, 0.989002, 0.692689, 0.699370, 0.937934,
0.522535, 0.824567, 0.058746, 0.146549, 0.009864, 0.994072,
0.992084, 0.956945, 0.984297, 0.795926, 0.845698, 0.935364,
0.963285, 0.972767, 0.992168, 0.528278, 0.826349, 0.996574,
0.811431, 0.930873, 0.680756, 0.721072, 0.937977, 0.731879,
0.996745, 0.016121, 0.951187, 0.989820, 0.996968, 0.592477,
0.889144, 0.036015, 0.040084, 0.858128, 0.418984, 0.746265,
0.907990, 0.980984, 0.900449, 0.934741, 0.986807, 0.872818,
0.812080, 0.780157]
class TestHamiltonAR1Switch(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.85472458, 0.53662099, 1.041419, -0.479157,
np.exp(-0.231404)**2, 0.243128, 0.713029],
'llf': -186.7575,
'llf_fit': -186.7575,
'llf_fit_em': -189.25446
}
super(TestHamiltonAR1Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=1)
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_smoothed, atol=1e-5)
def test_expected_durations(self):
expected_durations = [6.883477, 1.863513]
assert_allclose(self.result.expected_durations, expected_durations,
atol=1e-5)
hamilton_ar1_switch_tvtp_filtered = [
0.999996, 0.999211, 0.999849, 0.996007, 0.999825, 0.999991,
0.999981, 0.999819, 0.041745, 0.001116, 1.74e-05, 0.000155,
0.999976, 0.999958, 0.999993, 0.999878, 0.999940, 0.999791,
0.996553, 0.999486, 0.998485, 0.999894, 0.999765, 0.997657,
0.999619, 0.002853, 1.09e-05, 0.999884, 0.999996, 0.999997,
0.999919, 0.999987, 0.989762, 0.999807, 0.999978, 0.050734,
0.010660, 0.000217, 0.006174, 0.999977, 0.999954, 0.999995,
0.999934, 0.999867, 0.999824, 0.996783, 0.999941, 0.999948,
0.999981, 0.999658, 0.999994, 0.999753, 0.999859, 0.999330,
0.999993, 0.999956, 0.999970, 0.999996, 0.999991, 0.998674,
0.999869, 0.999432, 0.999570, 0.999600, 0.999954, 0.999499,
0.999906, 0.999978, 0.999712, 0.997441, 0.999948, 0.998379,
0.999578, 0.994745, 0.045936, 0.006816, 0.027384, 0.000278,
1.000000, 0.996382, 0.999541, 0.998130, 0.999992, 0.999990,
0.999860, 0.999986, 0.999997, 0.998520, 0.997777, 0.999821,
0.033353, 0.011629, 6.95e-05, 4.52e-05, 2.04e-06, 0.999963,
0.999977, 0.999949, 0.999986, 0.999240, 0.999373, 0.999858,
0.999946, 0.999972, 0.999991, 0.994039, 0.999817, 0.999999,
0.999715, 0.999924, 0.997763, 0.997944, 0.999825, 0.996592,
0.695147, 0.000161, 0.999665, 0.999928, 0.999988, 0.992742,
0.374214, 0.001569, 2.16e-05, 0.000941, 4.32e-05, 0.000556,
0.999955, 0.999993, 0.999942, 0.999973, 0.999999, 0.999919,
0.999438, 0.998738]
hamilton_ar1_switch_tvtp_smoothed = [
0.999997, 0.999246, 0.999918, 0.996118, 0.999740, 0.999990,
0.999984, 0.999783, 0.035454, 0.000958, 1.53e-05, 0.000139,
0.999973, 0.999939, 0.999994, 0.999870, 0.999948, 0.999884,
0.997243, 0.999668, 0.998424, 0.999909, 0.999860, 0.998037,
0.999559, 0.002533, 1.16e-05, 0.999801, 0.999993, 0.999997,
0.999891, 0.999994, 0.990096, 0.999753, 0.999974, 0.048495,
0.009289, 0.000542, 0.005991, 0.999974, 0.999929, 0.999995,
0.999939, 0.999880, 0.999901, 0.996221, 0.999937, 0.999935,
0.999985, 0.999450, 0.999995, 0.999768, 0.999897, 0.998930,
0.999992, 0.999949, 0.999954, 0.999995, 0.999994, 0.998687,
0.999902, 0.999547, 0.999653, 0.999538, 0.999966, 0.999485,
0.999883, 0.999982, 0.999831, 0.996940, 0.999968, 0.998678,
0.999780, 0.993895, 0.055372, 0.020421, 0.022913, 0.000127,
1.000000, 0.997072, 0.999715, 0.996893, 0.999990, 0.999991,
0.999811, 0.999978, 0.999998, 0.999100, 0.997866, 0.999787,
0.034912, 0.009932, 5.91e-05, 3.99e-05, 1.77e-06, 0.999954,
0.999976, 0.999932, 0.999991, 0.999429, 0.999393, 0.999845,
0.999936, 0.999961, 0.999995, 0.994246, 0.999570, 1.000000,
0.999702, 0.999955, 0.998611, 0.998019, 0.999902, 0.998486,
0.673991, 0.000205, 0.999627, 0.999902, 0.999994, 0.993707,
0.338707, 0.001359, 2.36e-05, 0.000792, 4.47e-05, 0.000565,
0.999932, 0.999993, 0.999931, 0.999950, 0.999999, 0.999940,
0.999626, 0.998738]
expected_durations = [
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391]]
class TestHamiltonAR1SwitchTVTP(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c recession
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[6.564923, 7.846371, -8.064123, -15.37636,
1.027190, -0.719760,
np.exp(-0.217003)**2, 0.161489, 0.022536],
'llf': -163.914049,
'llf_fit': -161.786477,
'llf_fit_em': -163.914049
}
exog_tvtp = np.c_[np.ones(len(rgnp)), rec]
super(TestHamiltonAR1SwitchTVTP, cls).setup_class(
true, rgnp, k_regimes=2, order=1, exog_tvtp=exog_tvtp)
@skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_smoothed, atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations, expected_durations,
rtol=1e-5, atol=1e-7)
class TestFilardo(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:].values
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1].values)
super(TestFilardo, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@skip
def test_fit(self, **kwargs):
pass
@skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
class TestFilardoPandas(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
cls.mar_filardo.index = pd.date_range('1948-02-01', '1991-04-01',
freq='MS')
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:]
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1])
super(TestFilardoPandas, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@skip
def test_fit(self, **kwargs):
pass
@skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
|
import os
import rnnSMAP
# from rnnSMAP import runTrainLSTM
import numpy as np
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
import matplotlib
#################################################
# noise affact on sigmaX (or sigmaMC)
doOpt = []
# doOpt.append('train')
doOpt.append('test')
doOpt.append('plotBox')
noiseOpt = 'SMAP'
noiseNameLst = [None, '5e2', '1e1', '2e1', '3e1', '4e1', '5e1']
noiseNameLstPlot = ['0', '0.05', '0.1', '0.2', '0.3', '0.4', '0.5']
strSigmaLst = ['sigmaX', 'sigmaMC']
strErrLst = ['RMSE', 'ubRMSE']
saveFolder = os.path.join(
rnnSMAP.kPath['dirResult'], 'Sigma', 'int_noise_red')
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
rootDB = rnnSMAP.kPath['DB_L3_NA']
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 10})
#################################################
if 'train' in doOpt:
opt = rnnSMAP.classLSTM.optLSTM(
rootDB=rootDB, rootOut=rootOut,
syr=2015, eyr=2015,
var='varLst_Forcing', varC='varConstLst_Noah',
dr=0.5, modelOpt='relu', model='cudnn',
loss='sigma'
)
trainName = 'CONUSv4f1'
opt['train'] = trainName
cudaIdLst = np.tile([0, 1, 2], 10)
for k in range(5, len(noiseNameLst)):
opt['target'] = 'SMAP_AM_rn'+noiseNameLst[k]
opt['var'] = 'varLst_Forcing'
opt['out'] = opt['train']+'_y15_Forcing_rn'+noiseNameLst[k]
runTrainLSTM.runCmdLine(
opt=opt, cudaID=cudaIdLst[k], screenName=opt['out'])
#################################################
if 'test' in doOpt:
dsLst = list()
statErrLst = list()
statSigmaLst = list()
for k in range(0, len(noiseNameLst)):
testName = 'CONUSv4f1'
# targetName = 'SMAP_AM'
if noiseNameLst[k] is not None:
targetName = 'SMAP_AM_rn'+noiseNameLst[k]
out = 'CONUSv4f1_y15_Forcing_rn'+noiseNameLst[k]
else:
targetName = 'SMAP_AM'
out = 'CONUSv4f1_y15_Forcing'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=[2016,2017])
ds.readData(var=targetName, field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
dsLst.append(ds)
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statErrLst.append(statErr)
statSigmaLst.append(statSigma)
#################################################
if 'plotBox' in doOpt:
dataTp = (statSigmaLst, statErrLst)
attrTp = (strSigmaLst, strErrLst)
titleTp = ('Sigma', 'Error')
saveFileTp = ('boxSigma', 'boxErr')
for iP in range(0, len(dataTp)):
dataLst = dataTp[iP]
attrLst = attrTp[iP]
for strS in attrLst:
plotLst = list()
statRef = getattr(dataLst[0], strS)
for data in dataLst:
stat = getattr(data, strS)
plotLst.append(stat/statRef)
fig = rnnSMAP.funPost.plotBox(
plotLst, labelC=noiseNameLstPlot, labelS=None,
title='Temporal Test ' + strS)
saveFile = os.path.join(saveFolder, 'box_'+strS)
fig.savefig(saveFile, dpi=300)
|
<filename>external/vcm/vcm/derived_mapping.py
import numpy as np
from typing import Mapping, Hashable, Callable, Iterable, MutableMapping
import xarray as xr
import vcm
class DerivedMapping(Mapping):
"""A uniform mapping-like interface for both existing and derived variables.
Allows register and computing derived variables transparently in either
the FV3GFS state or a saved dataset.
"""
VARIABLES: MutableMapping[Hashable, Callable[..., xr.DataArray]] = {}
REQUIRED_INPUTS: MutableMapping[Hashable, Iterable[Hashable]] = {}
def __init__(self, mapper: Mapping[Hashable, xr.DataArray]):
self._mapper = mapper
@classmethod
def register(cls, name: Hashable, required_inputs: Iterable[Hashable] = None):
"""Register a function as a derived variable.
Args:
name: the name the derived variable will be available under
required_inputs: Optional arg to list the potential
required inputs needed to derive said variable. Even if the
requirements are not well-defined, they should still be listed.
(e.g. dQu only needs dQxwind, dQywind if dQu is not in the data)
This is because the usage of this registry is for when
an output is explicitly requested as a derived output variable and thus
it is assumed the variable does not already exist and needs to
be derived.
"""
def decorator(func):
cls.VARIABLES[name] = func
if required_inputs:
cls.REQUIRED_INPUTS[name] = required_inputs
return func
return decorator
def __getitem__(self, key: Hashable) -> xr.DataArray:
if key in self.VARIABLES:
return self.VARIABLES[key](self)
else:
return self._mapper[key]
def keys(self):
return set(self._mapper) | set(self.VARIABLES)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def _data_arrays(self, keys: Iterable[Hashable]):
return {key: self[key] for key in keys}
def dataset(self, keys: Iterable[Hashable]) -> xr.Dataset:
return xr.Dataset(self._data_arrays(keys))
@classmethod
def find_all_required_inputs(
cls, derived_variables: Iterable[Hashable]
) -> Iterable[Hashable]:
# Helper function to find full list of required inputs for a given list
# of derived variables. Recurses because some required inputs have their
# own required inputs (e.g. pQ's)
def _recurse_find_deps(vars, deps):
vars_with_deps = [var for var in vars if var in cls.REQUIRED_INPUTS]
if len(vars_with_deps) == 0:
return
else:
new_deps = []
for var in vars_with_deps:
new_deps += cls.REQUIRED_INPUTS[var]
deps += new_deps
_recurse_find_deps(new_deps, deps)
deps: Iterable[Hashable] = []
_recurse_find_deps(derived_variables, deps)
return deps
@DerivedMapping.register("cos_zenith_angle", required_inputs=["time", "lon", "lat"])
def cos_zenith_angle(self):
return vcm.cos_zenith_angle(self["time"], self["lon"], self["lat"])
@DerivedMapping.register("evaporation", required_inputs=["latent_heat_flux"])
def evaporation(self):
lhf = self["latent_heat_flux"]
return vcm.latent_heat_flux_to_evaporation(lhf)
def _rotate(self: DerivedMapping, x, y):
wind_rotation_matrix = self.dataset(
[
"eastward_wind_u_coeff",
"eastward_wind_v_coeff",
"northward_wind_u_coeff",
"northward_wind_v_coeff",
]
)
return vcm.cubedsphere.center_and_rotate_xy_winds(
wind_rotation_matrix, self[x], self[y]
)
@DerivedMapping.register("dQu", required_inputs=["dQxwind", "dQywind"])
def dQu(self):
try:
return self._mapper["dQu"]
except (KeyError):
return _rotate(self, "dQxwind", "dQywind")[0]
@DerivedMapping.register("dQv", required_inputs=["dQxwind", "dQywind"])
def dQv(self):
try:
return self._mapper["dQv"]
except (KeyError):
return _rotate(self, "dQxwind", "dQywind")[1]
@DerivedMapping.register("eastward_wind")
def eastward_wind(self):
try:
return self._mapper["eastward_wind"]
except (KeyError):
return _rotate(self, "x_wind", "y_wind")[0]
@DerivedMapping.register("northward_wind")
def northward_wind(self):
try:
return self._mapper["northward_wind"]
except (KeyError):
return _rotate(self, "x_wind", "y_wind")[1]
@DerivedMapping.register(
"dQu_parallel_to_eastward_wind", required_inputs=["eastward_wind", "dQu"]
)
def dQu_parallel_to_eastward_wind_direction(self):
sign = np.sign(self["eastward_wind"] / self["dQu"])
return sign * abs(self["dQu"])
@DerivedMapping.register(
"dQv_parallel_to_northward_wind", required_inputs=["northward_wind", "dQv"]
)
def dQv_parallel_to_northward_wind_direction(self):
sign = np.sign(self["northward_wind"] / self["dQv"])
return sign * abs(self["dQv"])
@DerivedMapping.register(
"horizontal_wind_tendency_parallel_to_horizontal_wind",
required_inputs=["eastward_wind", "dQu", "northward_wind", "dQv"],
)
def horizontal_wind_tendency_parallel_to_horizontal_wind(self):
tendency_projection_onto_wind = (
self["eastward_wind"] * self["dQu"] + self["northward_wind"] * self["dQv"]
) / np.linalg.norm((self["eastward_wind"], self["northward_wind"]))
return tendency_projection_onto_wind
@DerivedMapping.register(
"net_shortwave_sfc_flux_derived",
required_inputs=["surface_diffused_shortwave_albedo"],
)
def net_shortwave_sfc_flux_derived(self):
# Positive = downward direction
albedo = self["surface_diffused_shortwave_albedo"]
downward_sfc_shortwave_flux = self[
"override_for_time_adjusted_total_sky_downward_shortwave_flux_at_surface"
]
return (1 - albedo) * downward_sfc_shortwave_flux
@DerivedMapping.register(
"is_land", required_inputs=["land_sea_mask"],
)
def is_land(self):
# one hot encoding for land / (sea or sea ice) surface
return xr.where(vcm.xarray_utils.isclose(self["land_sea_mask"], 1), 1.0, 0.0)
@DerivedMapping.register(
"is_sea", required_inputs=["land_sea_mask"],
)
def is_sea(self):
# one hot encoding for sea surface
return xr.where(vcm.xarray_utils.isclose(self["land_sea_mask"], 0), 1.0, 0.0)
@DerivedMapping.register(
"is_sea_ice", required_inputs=["land_sea_mask"],
)
def is_sea_ice(self):
# one hot encoding for sea ice surface
return xr.where(vcm.xarray_utils.isclose(self["land_sea_mask"], 2), 1.0, 0.0)
@DerivedMapping.register("Q1", required_inputs=["pQ1"])
def Q1(self):
try:
return self._mapper["Q1"]
except KeyError:
if "dQ1" in self.keys():
return self["dQ1"] + self["pQ1"]
else:
return self["pQ1"]
@DerivedMapping.register("Q2", required_inputs=["pQ2"])
def Q2(self):
try:
return self._mapper["Q2"]
except KeyError:
if "dQ2" in self.keys():
return self["dQ2"] + self["pQ2"]
else:
return self["pQ2"]
@DerivedMapping.register(
"pQ1", required_inputs=["pressure_thickness_of_atmospheric_layer"]
)
def pQ1(self):
try:
return self._mapper["pQ1"]
except KeyError:
return xr.zeros_like(self["pressure_thickness_of_atmospheric_layer"])
@DerivedMapping.register(
"pQ2", required_inputs=["pressure_thickness_of_atmospheric_layer"]
)
def pQ2(self):
try:
return self._mapper["pQ2"]
except KeyError:
return xr.zeros_like(self["pressure_thickness_of_atmospheric_layer"])
@DerivedMapping.register("internal_energy", required_inputs=["air_temperature"])
def internal_energy(self):
return vcm.internal_energy(self._mapper["air_temperature"])
@DerivedMapping.register(
"column_integrated_dQ1",
required_inputs=["dQ1", "pressure_thickness_of_atmospheric_layer"],
)
def column_integrated_dQ1(self):
return vcm.column_integrated_heating_from_isochoric_transition(
self._mapper["dQ1"], self._mapper["pressure_thickness_of_atmospheric_layer"]
)
@DerivedMapping.register(
"column_integrated_dQ2",
required_inputs=["dQ2", "pressure_thickness_of_atmospheric_layer"],
)
def column_integrated_dQ2(self):
da = -vcm.minus_column_integrated_moistening(
self._mapper["dQ2"], self._mapper["pressure_thickness_of_atmospheric_layer"]
)
return da.assign_attrs(
{"long_name": "column integrated moistening", "units": "mm/day"}
)
@DerivedMapping.register(
"column_integrated_Q1",
required_inputs=["Q1", "pressure_thickness_of_atmospheric_layer"],
)
def column_integrated_Q1(self):
return vcm.column_integrated_heating_from_isochoric_transition(
self._mapper["Q1"], self._mapper["pressure_thickness_of_atmospheric_layer"]
)
@DerivedMapping.register(
"column_integrated_Q2",
required_inputs=["Q2", "pressure_thickness_of_atmospheric_layer"],
)
def column_integrated_Q2(self):
da = -vcm.minus_column_integrated_moistening(
self._mapper["Q2"], self._mapper["pressure_thickness_of_atmospheric_layer"]
)
return da.assign_attrs(
{"long_name": "column integrated moistening", "units": "mm/day"}
)
@DerivedMapping.register(
"water_vapor_path",
required_inputs=["specific_humidity", "pressure_thickness_of_atmospheric_layer"],
)
def water_vapor_path(self):
try:
return self._mapper["water_vapor_path"]
except KeyError:
da = vcm.mass_integrate(
self._mapper["specific_humidity"],
self._mapper["pressure_thickness_of_atmospheric_layer"],
dim="z",
)
return da.assign_attrs(
{"long_name": "column integrated water vapor", "units": "mm"}
)
|
## Advent of Code 2018: Day 12
## https://adventofcode.com/2018/day/12
## <NAME>
## Answers: [Part 1]: 3059, [Part 2]: 3650000001776
import re, time, math
def maskHash(pots):
hash = 0
exp = 4
for char in pots:
if char == '#':
hash += 2**exp
exp -= 1
return hash
def advanceGeneration(state, rules):
nextState = '..'
for idx in range(2, len(state)-2):
pots = state[idx-2:idx+3] # capture 5 pots around the current pot
hash = maskHash(pots) # hash the 5-pot state to compare to a rule
if rules[hash] == 1:
nextState += '#'
else:
nextState += '.'
nextState += '..'
return nextState
def shiftState(state):
# Find average index of living plant cluster
low = state.find('#')
high = state.rfind('#')
avg = int((high+low)/2)
if avg > int(len(state)/2):
# Shift right
shift = avg - int(len(state)/2) # calculate shift amount
state = state[shift:] + '.'*shift
return state, shift
else:
return state, 0
if __name__ == "__main__":
rulesStr = []
with open('day12_input.txt') as f:
pattern = re.compile(r"initial state: (.*)")
initialStateStr = f.readline()
matches = pattern.match(initialStateStr)
initialState = matches.groups()[0]
_ = f.readline()
rule = f.readline()
while rule:
rulesStr.append(rule[:-1])
rule = f.readline()
# We define a 5-bit mask where '.'=0 and '#'=1 and create a list of dict entries {s: r} where s is the 5-bit state (as an int) and r is the 1-bit result.
rules = {}
for rule in rulesStr:
ruleState = maskHash(rule[0:5])
if rule[9] == '#': ruleResult = 1
else: ruleResult = 0
rules[ruleState] = ruleResult
for n in range(32): # if there are any undefined rules, define them as '.'=0
if n not in rules:
rules[n] = 0
## Part 1
# Extend initial state string arbitrarily in both directions. We only need to know this offest when calculating sums of pot numbers.
offsetStr = '.'*int(len(initialState)/2)
offset = len(offsetStr)
state = offsetStr + initialState + offsetStr
print('\n')
generations = 20
for _ in range(generations):
print(state)
state = advanceGeneration(state, rules)
# Find sum of all occupied pot numbers
potSum = 0
for i, pot in enumerate(state):
if pot == '#':
potSum += i - offset
print('\nThe sum of the numbers of all pots is {}\n\n'.format(potSum))
## Part 2
# This system stabilizes into a fixed pattern at generation 125, with all plants shifting 1 pot to the right on each subsequent generation.
# Thus, measuring the sum of all pot numbers on generation 125, we only need to find the number of living plants N and add N*(500000000000-125) to that sum.
# Extend initial state string arbitrarily in both directions. We only need to know this offest when calculating sums of pot numbers.
t_s = time.time()
offsetStr = '.'*int(len(initialState)/2)
offset = len(offsetStr)
state = offsetStr + initialState + offsetStr
totalShift = 0
targetGenerations = 50000000000
generations = 125
for gen in range(generations):
if gen % 1 == 0:
t_e = time.time() - t_s
#print('[{}]{} | G: {} (T+{} s)'.format(totalShift, state, gen, math.floor(t_e)))
state = advanceGeneration(state, rules)
state, shift = shiftState(state) # shifts list to follow living plants
totalShift += shift
# Find sum of all occupied pot numbers, and also count the number of active pots
potSum = 0
potCount = 0
for i, pot in enumerate(state):
if pot == '#':
potSum += i - offset + totalShift
potCount += 1
shiftedPotSum = potSum + potCount*(targetGenerations-generations)
t_e = time.time() - t_s
print('\nThe sum of the numbers of all pots is {}. (Took {} seconds)'.format(shiftedPotSum, math.floor(t_e)))
|
"""
Python Implementation of the EDDN publisher:
https://github.com/EDSM-NET/EDDN/blob/master/examples/PHP/EDDN.php
"""
from datetime import datetime, timezone
import hashlib
import json
import random
import requests
class EDDN:
_gateways = (
'https://eddn.edcd.io:4430/upload/',
# 'http://eddn-gateway.ed-td.space:8080/upload/',
)
_commodity_schemas = {
'production': 'https://eddn.edcd.io/schemas/commodity/3',
'test': 'https://eddn.edcd.io/schemas/commodity/3/test',
}
_shipyard_schemas = {
'production': 'https://eddn.edcd.io/schemas/shipyard/2',
'test': 'https://eddn.edcd.io/schemas/shipyard/2/test',
}
_outfitting_schemas = {
'production': 'https://eddn.edcd.io/schemas/outfitting/2',
'test': 'https://eddn.edcd.io/schemas/outfitting/2/test',
}
_debug = True
# As of 1.3, ED reports four levels.
_levels = (
'Low',
'Low',
'Med',
'High',
)
def __init__(
self,
uploaderID,
noHash,
softwareName,
softwareVersion
):
# Obfuscate uploaderID
if noHash:
self.uploaderID = uploaderID
else:
self.uploaderID = hashlib.sha1(uploaderID.encode('utf-8')).hexdigest()
self.softwareName = softwareName
self.softwareVersion = softwareVersion
def postMessage(
self,
message,
timestamp=0
):
if timestamp:
timestamp = datetime.fromtimestamp(timestamp).isoformat()
else:
timestamp = datetime.now(timezone.utc).astimezone().isoformat()
message['message']['timestamp'] = timestamp
url = random.choice(self._gateways)
headers = {
'content-type': 'application/json; charset=utf8'
}
if self._debug:
print(
json.dumps(
message,
sort_keys=True,
indent=4
)
)
r = requests.post(
url,
headers=headers,
data=json.dumps(
message,
ensure_ascii=False
).encode('utf8'),
verify=True
)
r.raise_for_status()
def publishCommodities(
self,
systemName,
stationName,
commodities,
economies,
prohibited,
timestamp=0
):
message = {}
message['$schemaRef'] = self._commodity_schemas[('test' if self._debug else 'production')] # NOQA
message['header'] = {
'uploaderID': self.uploaderID,
'softwareName': self.softwareName,
'softwareVersion': self.softwareVersion
}
message['message'] = {
'systemName': systemName,
'stationName': stationName,
'commodities': commodities,
'economies': economies,
'prohibited': prohibited,
}
self.postMessage(message, timestamp)
def publishShipyard(
self,
systemName,
stationName,
ships,
timestamp=0
):
message = {}
message['$schemaRef'] = self._shipyard_schemas[('test' if self._debug else 'production')] # NOQA
message['header'] = {
'uploaderID': self.uploaderID,
'softwareName': self.softwareName,
'softwareVersion': self.softwareVersion
}
message['message'] = {
'systemName': systemName,
'stationName': stationName,
'ships': ships,
}
self.postMessage(message, timestamp)
def publishOutfitting(
self,
systemName,
stationName,
modules,
timestamp=0
):
message = {}
message['$schemaRef'] = self._outfitting_schemas[('test' if self._debug else 'production')] # NOQA
message['header'] = {
'uploaderID': self.uploaderID,
'softwareName': self.softwareName,
'softwareVersion': self.softwareVersion
}
message['message'] = {
'systemName': systemName,
'stationName': stationName,
'modules': modules,
}
self.postMessage(message, timestamp)
|
#!/usr/bin/env python
from concurrent.futures import ThreadPoolExecutor
from optparse import OptionParser
import requests
from datetime import datetime
from datetime import timedelta
import json
import os
import sys
prog = os.path.basename(__file__)
parser = OptionParser(usage="Usage: %s <wv.json> <overrides_file>" % prog)
(options, args) = parser.parse_args()
wv_json_file = args[0]
overrides_file = args[1]
features_file = args[2]
override_dates_dict = {}
bad_snapshots = []
total_success_count = 0
total_failure_count = 0
time_format = "%Y-%m-%dT%H:%M:%SZ"
snapshots_url = ''
param_dict = {
'base': {
'REQUEST': 'GetSnapshot',
'FORMAT': 'image/jpeg'
},
'geographic': {
'BBOX': '-90,-180,90,180',
'CRS': 'EPSG:4326',
'WIDTH': '768',
'HEIGHT': '384'
},
'arctic': {
'BBOX': '-4195000,-4195000,4195000,4195000',
'CRS': 'EPSG:3413',
'WIDTH': '512',
'HEIGHT': '512'
},
'antarctic': {
'BBOX': '-4195000,-4195000,4195000,4195000',
'CRS': 'EPSG:3031',
'WIDTH': '512',
'HEIGHT': '512'
}
}
# These layers should not be combined with the reference layer
standalone_layers = [
'Graticule',
'Coastlines',
'Reference_Features',
'Reference_Labels'
]
dest_img_dir = './web/images/layers/previews/'
reference_layers = {
'geographic': 'OSM_Land_Water_Map',
'arctic': 'OSM_Land_Water_Map',
'antarctic': 'SCAR_Land_Water_Map'
}
current = datetime.now()
def track_bad_snapshots(layer_id, projection, request, img_file):
global bad_snapshots
# File sizes with SCAR/OSM Land_Water_Map layer only
arctic_bad_size = 9949
antarctic_bad_size = 4060
geographic_bad_size = 12088
size = img_file.tell()
if size in [ geographic_bad_size, arctic_bad_size, antarctic_bad_size ]:
bad_snapshots.append({
'id': layer_id,
'projection': projection,
'url': request.url
})
def get_best_date(projection, period, date_ranges):
global current
last_range = date_ranges[len(date_ranges) - 1]
start_date = last_range.get('startDate')
end_date = last_range.get('endDate')
parsed_start_date = datetime.strptime(start_date, time_format)
parsed_end_date = datetime.strptime(end_date, time_format)
p_year = parsed_end_date.year
p_month = parsed_end_date.month
interval = int(last_range.get('dateInterval'))
altered_date = None
# Handle daily layers
if (period == "daily"):
# Go back a few more days for single day layers since something
# too recent may not be processed yet
if (interval == 1):
interval = 3
altered_date = parsed_end_date - timedelta(days=interval)
# Choose a good daylight month for arctic
if projection == "arctic" and p_month not in [4, 5, 6, 7, 8, 9]:
if p_year == current.year and current.month < 6:
altered_date = parsed_end_date.replace(day=1, month=6, year=current.year-1)
else:
altered_date = parsed_end_date.replace(day=1, month=6)
# Choose a good daylight month for antarctic
if projection == "antarctic" and p_month not in [10, 11, 12, 1, 2]:
# TODO handle "bad" months for antarctic
altered_date = parsed_end_date.replace(month=12)
# Make sure modified date isn't out of layer date range
if altered_date and altered_date >= parsed_start_date:
date = datetime.strftime(altered_date, time_format)
else:
date = end_date
return date
def get_time_param(projection, layer_id, layer, params):
# Only include TIME param for temporal layers
date_ranges = layer.get('dateRanges')
start_date = layer.get('startDate')
period = layer.get('period')
if (date_ranges):
params['TIME'] = get_best_date(projection, period, date_ranges)
elif (start_date):
params['TIME'] = start_date
# Use any configured override dates
if (override_dates_dict.get(layer_id)):
params['TIME'] = override_dates_dict[layer_id]
def get_snapshots(layer):
global total_failure_count
global total_success_count
for projection, proj_dict in layer['projections'].items():
reference_layer = reference_layers[projection]
# Sometimes a layer id is provided per projection (e.g. Land Mask layers)
# We need to use this layer id to request the layer from WVS/GIBS
# But, we need to use the WV id as the file name (since that's how we will look up the image in WV)
if (proj_dict.get('layer')):
gibs_layer_id = proj_dict.get('layer')
wv_layer_id = layer['id']
else:
gibs_layer_id = wv_layer_id = layer['id']
params = { **param_dict['base'], **param_dict[projection] }
get_time_param(projection, wv_layer_id, layer, params)
if (gibs_layer_id is not reference_layer and gibs_layer_id not in standalone_layers):
params['LAYERS'] = (reference_layer + ',' + gibs_layer_id)
params['OPACITIES'] = '0.50,1'
else:
params['LAYERS'] = gibs_layer_id
dest_file_name = dest_img_dir + projection + '/' + wv_layer_id + '.jpg'
# Only get images that we don't have already
if (os.path.exists(dest_file_name)):
continue
try:
image_req = requests.get(snapshots_url, params=params)
if image_req.status_code == 200:
status_text = 'SUCCESS'
total_success_count += 1
with open(dest_file_name, 'xb') as image_file:
image_file.write(image_req.content)
if (gibs_layer_id == reference_layers[projection]):
continue
track_bad_snapshots(wv_layer_id, projection, image_req, image_file)
else:
total_failure_count += 1
status_text = 'ERROR'
print("\n%s: Result: %s - %s" % (prog, status_text, image_req.status_code))
print("%s: Layer: %s" % (prog, wv_layer_id))
print("%s: URL: %s" % (prog, image_req.url))
except Exception as e:
print("%s ERROR: %s" % (prog, e))
if __name__ == "__main__":
# Check to see if this feature is enabled in features.json before continuing
with open(features_file, 'rt') as features_json:
features_dict = json.load(features_json)
if features_dict['features']['previewSnapshots'] is False:
sys.exit();
# Allow manual configuration of layer ID to specific date to generate desired preview
with open(overrides_file, 'rt') as overrides_json:
override_dates_dict = json.load(overrides_json)
with open(wv_json_file, 'rt') as wv_json:
wv_json_dict = json.load(wv_json)
layers = wv_json_dict['layers']
snapshots_url = wv_json_dict['features']['imageDownload']['url']
fetch_snapshots = wv_json_dict['features']['previewSnapshots']
if not fetch_snapshots:
print("%s: Layer preview fetching disabled. Exiting." % prog)
sys.exit()
futures = []
with ThreadPoolExecutor() as executor:
for layer in layers.values():
futures.append(executor.submit(get_snapshots, layer))
for f in futures:
try:
# Need to call result() on each future to catch any raised exceptions
f.result()
except Exception as e:
print("%s:" % (e))
if len(bad_snapshots) > 0:
print("\n%s: WARNING: %s snapshots returned no content. See below for details: " % (prog, len(bad_snapshots)))
for bad_layer in bad_snapshots:
print("\n\t Layer: %s" % bad_layer['id'])
print("\t URL: %s" % (bad_layer['url']))
if total_success_count > 0:
print('\n%s: Successfully retrieved %s snapshots!' % (prog, total_success_count))
if total_failure_count > 0:
print('\n%s: WARNING: Failed to retrieve %s snapshots!' % (prog, total_failure_count))
if total_failure_count == 0 and total_success_count == 0:
print('\n%s: No snapshots were retrieved. All layers found in wv.json have existing preview images!' % (prog))
|
<reponame>risilab/Autobahn<gh_stars>10-100
import bisect
import datetime
import dataclasses
import os
import random
import warnings
from typing import Dict, List, Optional, Sequence
import hydra.utils
import pytorch_lightning
import pytorch_lightning.callbacks
import torch
from torch.utils.tensorboard import SummaryWriter
def make_folder_in_sequence(root: str, name: str, min_sequence_digits: int=2, max_attempts=1000) -> str: # noqa E252
"""Creates a folder with the given name in the given root directory, adding
a sequence number if the name already exists.
Parameters
----------
root
Root directory in which to create the folder
name
Name of the folder to create
min_sequence_digits
Optional integer representing the number of digits to include in the
sequence number.
Returns
-------
str
The full path to the created folder.
"""
base_dirname = os.path.join(root, name)
if not os.path.exists(base_dirname):
os.makedirs(base_dirname, exist_ok=True)
return base_dirname
digit_format = '_{{:0{}d}}'.format(min_sequence_digits)
for i in range(1, max_attempts):
dirname = base_dirname + digit_format.format(i)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
return dirname
raise ValueError('Could not create folder after {} attempts.'.format(max_attempts))
@dataclasses.dataclass
class TrainerConfiguration:
batch_size: int = 64
seed: Optional[int] = None
output_folder: Optional[str] = None
num_gpus: int = 1
max_epochs: int = 100
mixed_precision: bool = False
def ensure_config_defaults(config: TrainerConfiguration):
"""Ensure default values of the configuration are set, and fixes path to data_folder if necessary.
"""
if config.seed is None:
config.seed = random.randint(0, 2 ** 32 - 1)
if config.output_folder is None:
config.output_folder = os.path.abspath(make_folder_in_sequence(os.getcwd(), 'run'))
if hasattr(config, 'data'):
config.data.data_folder = hydra.utils.to_absolute_path(config.data.data_folder)
def make_trainer(config: TrainerConfiguration, monitor_loss='val_loss', monitor_mode='min'):
"""Creates a trainer according to the given configuration.
Parameters
----------
config : TrainerConfiguration
Configuration for the trainer.
monitor_loss : str
The loss to monitor for checkpoint saving.
Returns
-------
pytorch_lightning.Trainer
Trainer class with the given configuration.
"""
ensure_config_defaults(config)
callbacks = [
pytorch_lightning.callbacks.GPUStatsMonitor(intra_step_time=True),
pytorch_lightning.callbacks.LearningRateMonitor(),
pytorch_lightning.callbacks.ModelCheckpoint(monitor=monitor_loss, mode=monitor_mode),
#AutogradProfilerCallback(profile_idx=10),
]
kwargs = {}
if config.optim.gradient_clip_norm is not None:
kwargs['gradient_clip_val'] = config.optim.gradient_clip_norm
if config.mixed_precision:
kwargs['precision'] = 16
if config.num_gpus > 1:
kwargs['accelerator'] = 'ddp'
trainer = pytorch_lightning.Trainer(
gpus=config.num_gpus,
default_root_dir=config.output_folder,
callbacks=callbacks,
max_epochs=config.max_epochs,
progress_bar_refresh_rate=5,
**kwargs)
return trainer
def list_checkpoints(path: str) -> Dict[str, List[str]]:
"""Lists best checkpoint by version in the given folder.
Parameters
----------
path : str
Path to output folder of pytorch-lightning training run, or path to a single
checkpoint
"""
result = {}
if os.path.isfile(path):
return {'0': path}
base_path = os.path.join(path, 'lightning_logs')
versions = os.listdir(base_path)
for version in versions:
ckpt_folder_path = os.path.join(base_path, version, 'checkpoints')
last_ckpt_path = os.path.join(ckpt_folder_path, 'last.ckpt')
if os.path.exists(last_ckpt_path):
ckpt = torch.load(last_ckpt_path, map_location='cpu')
model_callback = pytorch_lightning.callbacks.ModelCheckpoint
if model_callback in ckpt['callbacks']:
ckpt_path = ckpt['callbacks'][model_callback]['best_model_path']
else:
ckpt_path = last_ckpt_path
else:
ckpts = os.listdir(ckpt_folder_path)
ckpts.sort(reverse=True)
ckpt_path = os.path.join(ckpt_folder_path, ckpts[0])
result[version] = ckpt_path
return result
def warmup_decay_lr(epoch: float, milestones: Sequence[float]=None, warmup_epochs: float=5, gamma: float=0.1):
"""Return a learning rate multiplier for a learning rate schedule with warmup and step decay.
Parameters
----------
epoch : Number
The current epoch.
milestones : Sequence[Number]
A sequence of milestone epochs at which the learning rate is multiplied by a factor `gamma`.
warmup_epochs : Number
The length of the warmup in epochs.
gamma : float
The amount by which to multiply the learning rate by at each milestone.
Returns
-------
float
The learning rate factor to apply.
"""
if milestones is None:
milestones = []
num_steps = bisect.bisect(milestones, epoch)
return (gamma ** num_steps) * min(1.0, (epoch + 1) / (warmup_epochs + 1))
class WarmupStepScheduler(torch.optim.lr_scheduler._LRScheduler):
"""Scheduler which implements a warmup and milestone decay scheduling policy."""
def __init__(self, optimizer, milestones: Sequence[float]=None, warmup_epochs: float=5, gamma: float=0.1, last_epoch=-1, verbose=False):
self.optimizer = optimizer
self.milestones = milestones
self.warmup_epochs = warmup_epochs
self.gamma = gamma
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
lr_factor = warmup_decay_lr(self.last_epoch, self.milestones, self.warmup_epochs, self.gamma)
return [lr_factor * base_lr for base_lr in self.base_lrs]
def initialize_TB_logging(root_dir='./', logging_path=None):
if logging_path is None:
basename = "logdir"
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
logging_path = "_".join([basename, suffix])
full_path = root_dir + logging_path
writer = SummaryWriter(full_path)
return writer
class AutogradProfilerCallback(pytorch_lightning.Callback):
"""Pytorch-lightning callback used for profiling at a given step."""
def __init__(self, enabled: bool=True, use_cuda: bool=True, profile_idx: int=20):
"""Creates a new profiler callback.
Parameters
----------
enabled : bool
Indicates whether the profiler is enabled. If False, this callback will be a no-op.
use_cuda : bool
Whether to profile cuda ops.
profile_idx : int
The step at which to profile.
"""
self._enabled = enabled
self._use_cuda = use_cuda
self._profiler = None
self._profile_idx = profile_idx
self._profile_done = False
def on_train_batch_start(self, trainer: pytorch_lightning.Trainer, pl_module: pytorch_lightning.LightningModule, batch, batch_idx, dataloader_idx):
if not self._profile_done and self._profile_idx in (batch_idx, batch_idx + 1):
pl_module.print('Profiling training batch at step {}'.format(batch_idx))
self._profiler = torch.autograd.profiler.profile(
enabled=self._enabled, use_cuda=self._use_cuda)
self._profiler.__enter__()
def on_train_batch_end(self, trainer: pytorch_lightning.Trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if not self._profile_done and self._profile_idx in (batch_idx, batch_idx + 1):
self._profiler.__exit__(None, None, None)
if self._profile_idx == batch_idx:
self._profiler.export_chrome_trace(os.path.join(trainer.default_root_dir, 'profile.trace'))
self._profile_done = True
|
# main imports
import numpy as np
import pandas as pd
import sys, os, argparse
# image processing
from PIL import Image
from ipfml import utils
from ipfml.processing import transform, segmentation
import matplotlib.pyplot as plt
# model imports
import joblib
from keras.models import load_model
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
from processing.features_extractions import extract_data
dataset_folder = cfg.dataset_path
scenes_list = cfg.scenes_names
zones_indices = cfg.zones_indices
output_figures = cfg.output_figures
def write_progress(progress):
barWidth = 180
output_str = "["
pos = barWidth * progress
for i in range(barWidth):
if i < pos:
output_str = output_str + "="
elif i == pos:
output_str = output_str + ">"
else:
output_str = output_str + " "
output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
print(output_str)
sys.stdout.write("\033[F")
def specific_display_label(label, chunk_size=3):
label = label[::-1] # reverse label
labels = [ label[i:i+chunk_size] for i in range(0, len(label), chunk_size) ]
return ' '.join(labels)[::-1]
def display_simulation_thresholds(predictions_data, human_threshold, image_indices, output, nsamples, every=None):
# get reference image
fig =plt.figure(figsize=(35, 22))
# fig.suptitle("Detection simulation for " + scene + " scene", fontsize=20)
# dataset information
start_index = int(image_indices[1]) - int(image_indices[0])
step_value = int(image_indices[1]) - int(image_indices[0])
label_freq = nsamples / step_value / 100 * 2
if every is not None:
step_value = every * step_value
# if every >= 1:
label_freq = 2 * label_freq
y_min_lim, y_max_lim = (-0.2, 1.2)
predictions = []
predictions_label = []
threshold_model = None
for index_v, v in enumerate(predictions_data):
v = float(v)
predictions.append(v)
predictions_label.append([0 if v < 0.5 else 1])
if threshold_model is None:
if v < 0.5:
threshold_model = index_v
# get index of current value
counter_index = 0
current_value = start_index
while(current_value < human_threshold):
counter_index += 1
current_value += step_value
plt.plot(predictions, lw=4)
plt.plot(predictions_label, linestyle='--', color='slategray', lw=4)
#plt.imshow(blocks[index], extent=[0, len(predictions), y_min_lim, y_max_lim])
# if zones_learned is not None:
# if index in zones_learned:
# ax = plt.gca()
# ax.set_facecolor((0.9, 0.95, 0.95))
# draw vertical line from (70,100) to (70, 250)
# plt.plot([counter_index, counter_index], [-2, 2], 'k-', lw=6, color='red')
plt.plot([threshold_model, threshold_model], [-2, 2], 'k-', lw=5, color='blue')
# if index % 4 == 0:
plt.ylabel('Bruité / Non bruité', fontsize=30)
# if index >= 12:
plt.xlabel('échantillons par pixel', fontsize=30)
x_labels = [specific_display_label(str(id * step_value + start_index)) for id, val in enumerate(predictions) if id % label_freq == 0] + [specific_display_label(str(nsamples))]
#x_labels = [id * step_value + start_index for id, val in enumerate(predictions) if id % label_freq == 0]
x = [v for v in np.arange(0, len(predictions)) if v % label_freq == 0] + [int(nsamples / (20 * every))]
y = np.arange(-1, 2, 10)
plt.xticks(x, x_labels, rotation=45, fontsize=24)
ax = plt.gca()
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(24)
plt.ylim(y_min_lim, y_max_lim)
fig.tight_layout()
plt.savefig(output + '.pdf', dpi=100)
def main():
parser = argparse.ArgumentParser(description="Read and compute entropy data file")
parser.add_argument('--model', type=str, help='entropy file data with estimated threshold to read and compute')
parser.add_argument('--method', type=str, help='method name to used', choices=cfg.features_choices_labels, default=cfg.features_choices_labels[0])
parser.add_argument('--params', type=str, help='param of the method used', default="")
parser.add_argument('--sequence', type=int, help='sequence length expected')
parser.add_argument('--imnorm', type=int, help="specify if image is normalized before computing something", default=0, choices=[0, 1])
parser.add_argument('--scene', type=str, help='Scene folder to use')
parser.add_argument('--zone', type=int, help='zone index to use')
parser.add_argument('--seq_norm', type=int, help='normalization sequence by features', choices=[0, 1])
parser.add_argument('--every', type=int, help="every images only", default=1)
parser.add_argument('--threshold', type=int, help="Expected thresholds for targeted zone", default=1000)
parser.add_argument('--output', type=str, help="output prediction file")
parser.add_argument('--nsamples', type=int, help="max number of samples")
args = parser.parse_args()
p_model = args.model
p_method = args.method
p_params = args.params
p_sequence = args.sequence
p_imnorm = args.imnorm
p_scene = args.scene
p_zone = args.zone
p_seq_norm = bool(args.seq_norm)
p_every = args.every
p_threshold = args.threshold
p_output = args.output
p_nsamples = args.nsamples
# scene path by default
scene_path = p_scene
if scene_path[-1] == '/':
scene_path = scene_path[:-1]
_, scene_name = os.path.split(p_scene)
# 2. load model and compile it
model = load_model(p_model)
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 4. get estimated thresholds using model and specific method
images_path = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
images_path = sorted([ img for i, img in enumerate(images_path) if i % p_every == 0 ])
number_of_images = len(images_path)
image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path ]
blocks_sequence = []
blocks_predictions = []
image_counter = 0
for img_i, img_path in enumerate(images_path):
blocks = segmentation.divide_in_blocks(Image.open(img_path), (200, 200))
block = blocks[p_zone]
# for index, block in enumerate(blocks):
# normalize if necessary
if p_imnorm:
block = np.array(block) / 255.
blocks_sequence.append(np.array(extract_data(block, p_method, p_params)))
# check if prediction is possible
if len(blocks_sequence) >= p_sequence:
data = np.array(blocks_sequence)
if data.ndim == 1:
data = data.reshape(len(blocks_sequence), 1)
else:
# check if sequence normalization is used
if p_seq_norm:
# check snorm process
#for _, seq in enumerate(data):
s, f = data.shape
for i in range(f):
#final_arr[index][]
data[:, i] = utils.normalize_arr_with_range(data[:, i])
data = np.expand_dims(data, axis=0)
prob = model.predict(data, batch_size=1)[0][0]
#print(index, ':', image_indices[img_i], '=>', prob)
blocks_predictions.append(prob)
#print('Block @', index, ':', len(blocks_sequence[index]))
# delete first element (just like sliding window)
del blocks_sequence[0]
# write progress bar
write_progress((image_counter + 1) / number_of_images)
image_counter = image_counter + 1
# 6. display results
f = open(p_output, 'w')
f.write(scene_name + ';')
f.write(str(p_zone) + ';')
for i, data in enumerate(blocks_predictions):
f.write(str(data) + ';')
f.write('\n')
# default set threshold
display_simulation_thresholds(blocks_predictions, p_threshold, image_indices, p_output + '_figure', p_nsamples, p_every)
if __name__== "__main__":
main()
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <<EMAIL>> (c) 2014-2017
# dr-prodigy <<EMAIL>> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestHungary(unittest.TestCase):
def setUp(self):
self.holidays = holidays.HU(observed=False)
self.next_year = date.today().year + 1
def test_national_day_was_not_celebrated_during_communism(self):
for year in range(1951, 1988):
self.assertNotIn(date(year, 3, 15), self.holidays)
self.assertIn(date(1989, 3, 15), self.holidays)
def test_holidays_during_communism(self):
for year in range(1950, 1989):
self.assertIn(date(year, 3, 21), self.holidays)
self.assertIn(date(year, 4, 4), self.holidays)
if year != 1956:
self.assertIn(date(year, 11, 7), self.holidays)
self.assertIn(date(1989, 3, 21), self.holidays)
def test_foundation_day_renamed_during_communism(self):
for year in range(1950, 1990):
self.assertEqual(
self.holidays[date(year, 8, 20)], "A keny<NAME>"
)
def test_christian_holidays_2nd_day_was_not_held_in_1955(self):
hu_1955 = holidays.Hungary(years=[1955])
self.assertNotIn(date(1955, 4, 11), hu_1955)
self.assertNotIn(date(1955, 12, 26), hu_1955)
def test_good_friday_since_2017(self):
self.assertNotIn(date(2016, 3, 25), self.holidays)
self.assertIn(date(2017, 4, 14), self.holidays)
self.assertIn(date(2018, 3, 30), self.holidays)
def test_whit_monday_since_1992(self):
self.assertNotIn(date(1991, 5, 20), self.holidays)
self.assertIn(date(1992, 6, 8), self.holidays)
def test_labour_day_since_1946(self):
self.assertNotIn(date(1945, 5, 1), self.holidays)
for year in range(1946, self.next_year):
self.assertIn(date(year, 5, 1), self.holidays)
def test_labour_day_was_doubled_in_early_50s(self):
for year in range(1950, 1954):
self.assertIn(date(year, 5, 2), self.holidays)
def test_october_national_day_since_1991(self):
for year in range(1991, self.next_year):
self.assertIn(date(year, 10, 23), self.holidays)
def test_all_saints_day_since_1999(self):
for year in range(1999, self.next_year):
self.assertIn(date(year, 11, 1), self.holidays)
def test_additional_day_off(self):
observed_days_off = holidays.HU(
observed=True, years=range(2010, self.next_year)
)
for day in [
date(2010, 12, 24),
date(2011, 3, 14),
date(2011, 10, 31),
date(2012, 3, 16),
date(2012, 4, 30),
date(2012, 10, 22),
date(2012, 11, 2),
date(2012, 12, 24),
date(2013, 8, 19),
date(2013, 12, 24),
date(2013, 12, 27),
date(2014, 5, 2),
date(2014, 10, 24),
date(2014, 12, 24),
date(2015, 1, 2),
date(2015, 8, 21),
date(2015, 12, 24),
date(2016, 3, 14),
date(2016, 10, 31),
date(2018, 3, 16),
date(2018, 4, 30),
date(2018, 10, 22),
date(2018, 11, 2),
date(2018, 12, 24),
date(2018, 12, 31),
date(2019, 8, 19),
date(2019, 12, 24),
date(2019, 12, 27),
]:
self.assertNotIn(day, self.holidays)
self.assertIn(day, observed_days_off)
def test_monday_new_years_eve_day_off(self):
observed_day_off = holidays.HU(observed=True)
self.assertIn(date(2018, 12, 31), observed_day_off)
def test_2018(self):
self.assertIn(date(2018, 1, 1), self.holidays) # newyear
self.assertIn(date(2018, 3, 15), self.holidays) # national holiday
self.assertIn(date(2018, 3, 30), self.holidays) # good friday
self.assertIn(date(2018, 4, 1), self.holidays) # easter 1.
self.assertIn(date(2018, 4, 2), self.holidays) # easter 2.
self.assertIn(date(2018, 5, 1), self.holidays) # Workers' Day
self.assertIn(date(2018, 5, 20), self.holidays) # Pentecost
self.assertIn(date(2018, 5, 21), self.holidays) # Pentecost monday
self.assertIn(date(2018, 8, 20), self.holidays) # State Foundation Day
self.assertIn(date(2018, 10, 23), self.holidays) # National Day
self.assertIn(date(2018, 11, 1), self.holidays) # All Saints' Day
self.assertIn(date(2018, 12, 25), self.holidays) # First christmas
self.assertIn(date(2018, 12, 26), self.holidays) # Second christmas
|
<gh_stars>0
import requests
import json
class ClubhouseAPI:
def __init__(self, user):
self.api_url = 'https://www.clubhouseapi.com/api'
self.user = user
def me(self):
return requests.post('{}/me'.format(self.api_url), headers=self.user.headers, cookies=self.user.cookies)
def get_profile(self, user_id):
return requests.post('{}/get_profile'.format(self.api_url), json={'user_id': user_id},
headers=self.user.headers, cookies=self.user.cookies)
def get_followers(self, user_id, page_size=50, page=1):
return requests.get(
'{}/get_followers?user_id={}&page_size={}&page={}'.format(self.api_url, user_id, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def get_following(self, user_id, page_size=50, page=1):
return requests.get(
'{}/get_following?user_id={}&page_size={}&page={}'.format(self.api_url, user_id, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def get_all_topics(self):
return requests.get('{}/get_all_topics'.format(self.api_url),
headers=self.user.headers, cookies=self.user.cookies)
def get_users_for_topic(self, topic_id, page_size=25, page=1):
return requests.get(
'{}/get_users_for_topic?topic_id={}&page_size={}&page={}'.format(self.api_url, topic_id, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def get_clubs_for_topic(self, topic_id, page_size=25, page=1):
return requests.get(
'{}/get_clubs_for_topic?topic_id={}&page_size={}&page={}'.format(self.api_url, topic_id, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def get_club(self, club_id):
return requests.post('{}/get_club'.format(self.api_url), json={'club_id': club_id, 'source_topic_id': None},
headers=self.user.headers, cookies=self.user.cookies)
def get_club_members(self, club_id, return_followers=1, return_members=0, page_size=50, page=1):
return requests.get(
'{}/get_club_members?club_id={}&return_followers={}&return_members={}&page_size={}&page={}'.format(
self.api_url, club_id, return_followers, return_members, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def check_for_update(self, is_testflight=0):
return requests.get(
'{}/check_for_update?is_testflight={}'.format(self.api_url, is_testflight),
headers=self.user.headers, cookies=self.user.cookies)
def get_suggested_invites(self, phone_numbers):
contacts = []
for number in phone_numbers:
contacts.append({'phone_number': number})
return requests.post(
'{}/get_suggested_invites'.format(self.api_url), json={'club_id': None, 'contacts': contacts, 'upload_contacts': False},
headers=self.user.headers, cookies=self.user.cookies)
def get_events(self, is_filtered='false', page_size=25, page=1):
return requests.get(
'{}/get_events?is_filtered={}&page_size={}&page={}'.format(self.api_url, is_filtered, page_size, page),
headers=self.user.headers, cookies=self.user.cookies)
def search_users(self, query):
return requests.post('{}/search_users'.format(self.api_url), json={'cofollows_only': False,
'followers_only': False, 'following_only': False, 'query': query},
headers=self.user.headers, cookies=self.user.cookies)
def search_clubs(self, query):
return requests.post('{}/search_clubs'.format(self.api_url), json={'cofollows_only': False,
'followers_only': False, 'following_only': False, 'query': query},
headers=self.user.headers, cookies=self.user.cookies)
class User:
def __init__(self, token, user_id, user_agent, device_id, cookie_uid):
self.token = token
self.user_id = user_id
self.user_agent = user_agent
self.device_id = device_id
self.cookie_uid = cookie_uid
self.headers = self.set_headers()
self.cookies = self.set_cookies()
def set_headers(self):
headers = {}
headers.update({'Authorization': self.token})
headers.update({'CH-Languages': 'en-US'})
headers.update({'CH-UserID': self.user_id})
headers.update({'CH-Locale': 'en_US'})
headers.update({'Accept-Encoding': 'gzip, deflate, br'})
headers.update({'Accept-Language': 'en-US;q=1'})
headers.update({'CH-AppBuild': '269'})
headers.update({'CH-AppVersion': '0.1.25'})
headers.update({'Accept': 'application/json'})
headers.update({'CH-DeviceId': self.device_id})
headers.update({'User-Agent': self.user_agent})
headers.update({'Connection': 'keep-alive'})
return headers
def set_cookies(self):
cookies = {'__cfduid': self.cookie_uid}
return cookies
user = User(token='<KEY>', user_id='XXXXXXXXXX', user_agent='clubhouse/XXX (iXXXXX; iOS XX.X.X)',
device_id='XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXX', cookie_uid='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
api = ClubhouseAPI(user)
resp = api.search_users('.').text
print(resp)
print(f"Number of Clubhouse users: {json.loads(resp)['count']:,}")
'''
All endpoints:
record_action_trails
start_phone_number_auth
call_phone_number_auth
resend_phone_number_auth
complete_phone_number_auth
check_waitlist_status
get_release_notes
get_all_topics
get_topic
get_clubs_for_topic
get_users_for_topic
update_name
update_display
name
update_bio
update_username
update_twitter_username
update_skintone
add_user_topic
remove_user_topic
update_notifications
add_email
get_settings
update_instagram_username
report_incident
get_followers
get_following
get_mutual_follows
get_suggested_follows_friends_only
get_suggested_follows_all
get_suggested_follows_similar
ignore_suggested_follow
follow
follow_multiple
unfollow
update_follow_notifications
block
unblock
get_profile
get_channel
get_channels
get_suggested_speakers
create_channel
active_ping
invite_to_existing_channel
audience_reply
block_from_channel
get_welcome_channel
get_create_channel_targets
update_channel_flags
hide_channel
get_notifications
get_actionable_notifications
ignore_actionable_notification
me
get_online_friends
search_users
search_clubs
check_for_update
get_suggested_invites
invite_to_app
invite_from_waitlist
add_club_admin
add_club_member
get_club
get_club_members
get_suggested_club_invites
remove_club_admin
remove_club_member
accept_club_member_invite
follow_club
unfollow_club
get_club_nominations
approve_club_nomination
reject_club_nomination
get_clubs
update_is_follow_allowed
update_is_membership_private
update_is_community
update_club_description
update_club_rules
update_club_topics
update_club_photo
add_club_topic
remove_club_topic
get_events
get_events_for_user
get_events_to_start
delete_event
create_event
edit_event
get_event
'''
|
<filename>colors/__init__.py
"""
HOW TO USE:
In a string, put the color you want first, with the first item and the second item at the emd. (Example: _str = f"{green[0]}Green!{green[1]}"
"""
reset = [str(u"\u001b[0m"), str(u"\u001b[0m")]
bold = [str(u"\u001b[1m"), str(u"\u001b[22m")]
dim = [str(u"\u001b[2m"), str(u"\u001b[22m")]
italic = [str(u"\u001b[3m"), str(u"\u001b[23m")]
underline = [str(u"\u001b[4m"), str(u"\u001b[24m")]
inverse = [str(u"\u001b[7m"), str(u"\u001b[27m")]
hidden = [str(u"\u001b[8m"), str(u"\u001b[28m")]
strikethrough = [str(u"\u001b[9m"), str(u"\u001b[29m")]
black = [str(u"\u001b[30m"), str(u"\u001b[39m")]
red = [str(u"\u001b[31m"), str(u"\u001b[39m")]
green = [str(u"\u001b[32m"), str(u"\u001b[39m")]
yellow = [str(u"\u001b[33m"), str(u"\u001b[39m")]
blue = [str(u"\u001b[34m"), str(u"\u001b[39m")]
magenta = [str(u"\u001b[35m"), str(u"\u001b[39m")]
cyan = [str(u"\u001b[36m"), str(u"\u001b[39m")]
white = [str(u"\u001b[37m"), str(u"\u001b[39m")]
gray = [str(u"\u001b[90m"), str(u"\u001b[39m")]
grey = [str(u"\u001b[90m"), str(u"\u001b[39m")]
brightRed = [str(u"\u001b[91m"), str(u"\u001b[39m")]
brightGreen = [str(u"\u001b[92m"), str(u"\u001b[39m")]
brightYellow = [str(u"\u001b[93m"), str(u"\u001b[39m")]
brightBlue = [str(u"\u001b[94m"), str(u"\u001b[39m")]
brightMagenta = [str(u"\u001b[95m"), str(u"\u001b[39m")]
brightCyan = [str(u"\u001b[96m"), str(u"\u001b[39m")]
brightWhite = [str(u"\u001b[97m"), str(u"\u001b[39m")]
bgBlack = [str(u"\u001b[40m"), str(u"\u001b[49m")]
bgRed = [str(u"\u001b[41m"), str(u"\u001b[49m")]
bgGreen = [str(u"\u001b[42m"), str(u"\u001b[49m")]
bgYellow = [str(u"\u001b[43m"), str(u"\u001b[49m")]
bgBlue = [str(u"\u001b[44m"), str(u"\u001b[49m")]
bgMagenta = [str(u"\u001b[45m"), str(u"\u001b[49m")]
bgCyan = [str(u"\u001b[46m"), str(u"\u001b[49m")]
bgWhite = [str(u"\u001b[47m"), str(u"\u001b[49m")]
bgGray = [str(u"\u001b[100m"), str(u"\u001b[49m")]
bgGrey = [str(u"\u001b[100m"), str(u"\u001b[49m")]
bgBrightRed = [str(u"\u001b[101m"), str(u"\u001b[49m")]
bgBrightGreen = [str(u"\u001b[102m"), str(u"\u001b[49m")]
bgBrightYellow = [str(u"\u001b[103m"), str(u"\u001b[49m")]
bgBrightBlue = [str(u"\u001b[104m"), str(u"\u001b[49m")]
bgBrightMagenta = [str(u"\u001b[105m"), str(u"\u001b[49m")]
bgBrightCyan = [str(u"\u001b[106m"), str(u"\u001b[49m")]
bgBrightWhite = [str(u"\u001b[107m"), str(u"\u001b[49m")]
# legacy styles for colors pre v1.0.0
blackBG = [str(u"\u001b[40m"), str(u"\u001b[49m")]
redBG = [str(u"\u001b[41m"), str(u"\u001b[49m")]
greenBG = [str(u"\u001b[42m"), str(u"\u001b[49m")]
yellowBG = [str(u"\u001b[43m"), str(u"\u001b[49m")]
blueBG = [str(u"\u001b[44m"), str(u"\u001b[49m")]
magentaBG = [str(u"\u001b[45m"), str(u"\u001b[49m")]
cyanBG = [str(u"\u001b[46m"), str(u"\u001b[49m")]
whiteBG = [str(u"\u001b[47m"), str(u"\u001b[49m")]
def rainbow(_str):
arr = [char for char in _str]
rainbowcount = 1
for i in range(0, len(arr)):
if arr[i] == " ": continue
if rainbowcount == 1: arr[i] = red[0]+arr[i]+red[1]
if rainbowcount == 2: arr[i] = yellow[0]+arr[i]+yellow[1]
if rainbowcount == 3: arr[i] = green[0]+arr[i]+green[1]
if rainbowcount == 4: arr[i] = blue[0]+arr[i]+blue[1]
if rainbowcount == 5: arr[i] = magenta[0]+arr[i]+magenta[1]
rainbowcount += 1
if rainbowcount == 6: rainbowcount = 1
arr = "".join(arr)
return f"{bold[0]}{arr}{bold[1]}" |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (C) 2013, the Pyzo development team
#
# Yoton is distributed under the terms of the 2-Clause BSD License.
# The full license can be found in 'license.txt'.
""" Module yoton.channels.channels_pubsub
Defines the channel classes for the pub/sub pattern.
"""
import time
from yoton.misc import bytes, xrange
from yoton.channels import BaseChannel
QUEUE_NULL = 0
QUEUE_OK = 1
QUEUE_FULL = 2
class PubChannel(BaseChannel):
""" PubChannel(context, slot_base, message_type=yoton.TEXT)
The publish part of the publish/subscribe messaging pattern.
Sent messages are received by all yoton.SubChannel instances with
the same slot.
There are no limitations for this channel if events are not processed.
Parameters
----------
context : yoton.Context instance
The context that this channel uses to send messages in a network.
slot_base : string
The base slot name. The channel appends an extension to indicate
message type and messaging pattern to create the final slot name.
The final slot is used to connect channels at different contexts
in a network
message_type : yoton.MessageType instance
(default is yoton.TEXT)
Object to convert messages to bytes and bytes to messages.
Users can create their own message_type class to let channels
any type of message they want.
"""
def __init__(self, *args, **kwargs):
BaseChannel.__init__(self, *args, **kwargs)
self._source_set = set()
def _messaging_patterns(self):
return "pub-sub", "sub-pub"
def send(self, message):
""" send(message)
Send a message over the channel. What is send as one
message will also be received as one message.
The message is queued and delivered to all corresponding
SubChannels (i.e. with the same slot) in the network.
"""
self._send(self.message_to_bytes(message))
def _recv_package(self, package):
""" Overloaded to set blocking mode.
Do not call _maybe_emit_received(), a PubChannel never emits
the "received" signal.
"""
message = package._data.decode("utf-8")
source_id = package._source_id
# Keep track of who's queues are full
if message == "full":
self._source_set.add(source_id)
else:
self._source_set.discard(source_id)
# Set lock if there is a channel with a full queue,
# Unset if there are none
if self._source_set:
self._set_send_lock(True)
# sys.stderr.write('setting lock\n')
else:
self._set_send_lock(False)
# sys.stderr.write('unsetting lock\n')
class SubChannel(BaseChannel):
""" SubChannel(context, slot_base, message_type=yoton.TEXT)
The subscribe part of the publish/subscribe messaging pattern.
Received messages were sent by a yoton.PubChannel instance at the
same slot.
This channel can be used as an iterator, which yields all pending
messages. The function yoton.select_sub_channel can
be used to synchronize multiple SubChannel instances.
If no events being processed this channel works as normal, except
that the received signal will not be emitted, and sync mode will
not work.
Parameters
----------
context : yoton.Context instance
The context that this channel uses to send messages in a network.
slot_base : string
The base slot name. The channel appends an extension to indicate
message type and messaging pattern to create the final slot name.
The final slot is used to connect channels at different contexts
in a network
message_type : yoton.MessageType instance
(default is yoton.TEXT)
Object to convert messages to bytes and bytes to messages.
Users can create their own message_type class to let channels
any type of message they want.
"""
def __init__(self, *args, **kwargs):
BaseChannel.__init__(self, *args, **kwargs)
# To detect when to block the sending side
self._queue_status = QUEUE_NULL
self._queue_status_timeout = 0
self._HWM = 32
self._LWM = 16
# Automatically check queue status when new data
# enters the system
self.received.bind(self._check_queue_status)
def _messaging_patterns(self):
return "sub-pub", "pub-sub"
def __iter__(self):
return self
def __next__(self): # Python 3.x
m = self.recv(False)
if m:
return m
else:
raise StopIteration()
def next(self): # Python 2.x
""" next()
Return the next message, or raises StopIteration if non available.
"""
return self.__next__()
## For sync mode
def set_sync_mode(self, value):
""" set_sync_mode(value)
Set or unset the SubChannel in sync mode. When in sync mode, all
channels that send messages to this channel are blocked if
the queue for this SubChannel reaches a certain size.
This feature can be used to limit the rate of senders if the consumer
(i.e. the one that calls recv()) cannot keep up with processing
the data.
This feature requires the yoton event loop to run at the side
of the SubChannel (not necessary for the yoton.PubChannel side).
"""
value = bool(value)
# First reset block status if necessary
if self._queue_status == QUEUE_FULL:
self._send_block_message_to_senders("ok")
# Set new queue status flag
if value:
self._queue_status = QUEUE_OK
else:
self._queue_status = QUEUE_NULL
def _send_block_message_to_senders(self, what):
""" _send_block_message_to_senders(what)
Send a message to the PubChannel side to make it block/unblock.
"""
# Check
if not self._context.connection_count:
return
# Send
try:
self._send(what.encode("utf-8"))
except IOError:
# If self._closed
self._check_queue_status = QUEUE_NULL
def _check_queue_status(self, dummy=None):
""" _check_queue_status()
Check the queue status. Returns immediately unless this receiving
channel runs in sync mode.
If the queue is above a certain size, will send out a package that
will make the sending side block. If the queue is below a certain
size, will send out a package that will make the sending side unblock.
"""
if self._queue_status == QUEUE_NULL:
return
elif len(self._q_in) > self._HWM:
if self._queue_status == QUEUE_OK:
self._queue_status = QUEUE_FULL
self._queue_status_timeout = time.time() + 4.0
self._send_block_message_to_senders("full")
elif len(self._q_in) < self._LWM:
if self._queue_status == QUEUE_FULL:
self._queue_status = QUEUE_OK
self._queue_status_timeout = time.time() + 4.0
self._send_block_message_to_senders("ok")
# Resend every so often. After 10s the PubChannel will unlock itself
if self._queue_status_timeout < time.time():
self._queue_status_timeout = time.time() + 4.0
if self._queue_status == QUEUE_OK:
self._send_block_message_to_senders("ok")
else:
self._send_block_message_to_senders("full")
## Receive methods
def recv(self, block=True):
""" recv(block=True)
Receive a message from the channel. What was send as one
message is also received as one message.
If block is False, returns empty message if no data is available.
If block is True, waits forever until data is available.
If block is an int or float, waits that many seconds.
If the channel is closed, returns empty message.
"""
# Check queue status, maybe we need to block the sender
self._check_queue_status()
# Get package
package = self._recv(block)
# Return message content or None
if package is not None:
return self.message_from_bytes(package._data)
else:
return self.message_from_bytes(bytes())
def recv_all(self):
""" recv_all()
Receive a list of all pending messages. The list can be empty.
"""
# Check queue status, maybe we need to block the sender
self._check_queue_status()
# Pop all messages and return as a list
pop = self._q_in.pop
packages = [pop() for i in xrange(len(self._q_in))]
return [self.message_from_bytes(p._data) for p in packages]
def recv_selected(self):
""" recv_selected()
Receive a list of messages. Use only after calling
yoton.select_sub_channel with this channel as one of the arguments.
The returned messages are all received before the first pending
message in the other SUB-channels given to select_sub_channel.
The combination of this method and the function select_sub_channel
enables users to combine multiple SUB-channels in a way that
preserves the original order of the messages.
"""
# No need to check queue status, we've done that in the
# _get_pending_sequence_numbers() method
# Prepare
q = self._q_in
ref_seq = self._ref_seq
popped = []
# Pop all messages that have sequence number lower than reference
try:
for i in xrange(len(q)):
part = q.pop()
if part._recv_seq > ref_seq:
q.insert(part) # put back in queue
break
else:
popped.append(part)
except IndexError:
pass
# Done; return messages
return [self.message_from_bytes(p._data) for p in popped]
def _get_pending_sequence_numbers(self):
""" _get_pending_sequence_numbers()
Get the sequence numbers of the first and last pending messages.
Returns (-1,-1) if no messages are pending.
Used by select_sub_channel() to determine which channel should
be read from first and what the reference sequence number is.
"""
# Check queue status, maybe we need to block the sender
self._check_queue_status()
# Peek
try:
q = self._q_in
return q.peek(0)._recv_seq, q.peek(-1)._recv_seq + 1
except IndexError:
return -1, -1
def select_sub_channel(*args):
""" select_sub_channel(channel1, channel2, ...)
Returns the channel that has the oldest pending message of all
given yoton.SubCannel instances. Returns None if there are no pending
messages.
This function can be used to read from SubCannels instances in the
order that the messages were send.
After calling this function, use channel.recv_selected() to obtain
all messages that are older than any pending messages in the other
given channels.
"""
# Init
smallest_seq1 = 99999999999999999999999999
smallest_seq2 = 99999999999999999999999999
first_channel = None
# For each channel ...
for channel in args:
# Check if channel is of right type
if not isinstance(channel, SubChannel):
raise ValueError("select_sub_channel() only accepts SUB channels.")
# Get and check sequence
seq1, seq2 = channel._get_pending_sequence_numbers()
if seq1 >= 0:
if seq1 < smallest_seq1:
# Cannot go beyond number of packages in queue,
# or than seq1 of earlier selected channel.
smallest_seq2 = min(smallest_seq1, smallest_seq2, seq2)
# Store
smallest_seq1 = seq1
first_channel = channel
else:
# The first_channel cannot go beyond the 1st package in THIS queue
smallest_seq2 = min(smallest_seq2, seq1)
# Set flag at channel and return
if first_channel:
first_channel._ref_seq = smallest_seq2
return first_channel
else:
return None
|
<reponame>j-sulliman/acici
from .models import FvAEPg, Nxos_vlan_svi
import os
import pprint as pp
os.environ['DJANGO_SETTINGS_MODULE'] = 'nxos_aci.settings'
import django
django.setup()
def handle_uploaded_file(f):
with open('some/file/name.txt', 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def read_nxos_config_file(filename="Configurations/SW-ATCA-93180-1-Configuration_0.1"):
config_file = open(filename, "r")
return config_file
def create_vlans_from_nxos(file, cmd_string="vlan "):
vlan_id = ''
prev_line = ''
epgs_bds = {}
vrf_lst = []
for line in file:
if line.startswith("hostname "):
tmp_hostname = line.split("hostname ")
hostname = tmp_hostname[1].strip()
if line.startswith(cmd_string) and len(line) < 11:
prev_line = line
temp_line = line.split(" ")
vlan_id = temp_line[1].strip()
if len(vlan_id) == 1:
vlan_id = '000' + vlan_id
elif len(vlan_id) == 2:
vlan_id = '00'+ vlan_id
elif len(vlan_id) == 3:
vlan_id = '0' + vlan_id
epgs_bds[vlan_id] = {}
elif line.startswith(" name") and prev_line.startswith(cmd_string):
vlan_name_lst = line.split(" name")
vlan_name = vlan_name_lst[1].strip()
epgs_bds[vlan_id]= {
"name": vlan_name,
"hostname": hostname}
elif line.startswith("interface Vlan"):
subnet_lst = line.split('interface Vlan')
svi_cleaned = subnet_lst[1].strip()
if len(svi_cleaned) == 1:
svi_cleaned = '000' + svi_cleaned
elif len(svi_cleaned) == 2:
svi_cleaned = '00'+ svi_cleaned
elif len(svi_cleaned) == 3:
svi_cleaned = '0' + svi_cleaned
prev_line = line
elif line.startswith(" vrf member") and prev_line.startswith('interface Vlan'):
vrf_lst = line.split(' vrf member ')
if svi_cleaned in epgs_bds.keys():
epgs_bds[svi_cleaned]["vrf"] = vrf_lst[1].strip()
try:
if line.startswith(" ip address") and prev_line.startswith('interface Vlan') and epgs_bds[svi_cleaned]["vrf"] == vrf_lst[1].strip():
ip_lst = line.split(' ip address ')
epgs_bds[svi_cleaned]['ip'] = ip_lst[1].strip()
if epgs_bds[svi_cleaned]['ip'] == "10.8.223.2":
print(epgs_bds[svi_cleaned]['ip'])
prev_line = line
#print(prev_line)
if line.startswith(" ip ") and prev_line.startswith(' ip address ') and epgs_bds[svi_cleaned]["vrf"] == vrf_lst[1].strip():
ip_lst_tmp = line.split(' ip ')
#print(ip_lst_tmp)
epgs_bds[svi_cleaned]['ip'] = ip_lst_tmp[1].strip()
except:
if line.startswith(" ip address") and prev_line.startswith('interface Vlan'):
ip_lst = line.split(' ip address ')
prev_line = line
#print(prev_line)
ip = ip_lst[1].strip()
if line.startswith(" hsrp ") and prev_line.startswith(' ip address '):
prev_line=line
print(prev_line)
if line.startswith(" ip ") and prev_line.startswith(' hsrp '):
print(line)
#prev_line_list = prev_line.split('/')
ip_lst_tmp = line.split(' ip ')
#print(ip_lst_tmp)
ip = ip_lst_tmp[1].strip() + prev_line[-4:]
if svi_cleaned in epgs_bds.keys():
#print('it is {}'.format(ip_lst))
#print(prev_line[-4:])
epgs_bds[svi_cleaned]["vrf"] = "DEFAULT"
epgs_bds[svi_cleaned]["ip"] = ip
#print("ip is {}".format(epgs_bds[svi_cleaned]["ip"]))
#pp.pprint(epgs_bds)
return epgs_bds
def import_nxos_to_django(input_dict):
Nxos_vlan_svi.objects.all().delete()
for keys, values in input_dict.items():
#pp.pprint("{} {}".format(keys, values))
vlan_entry = Nxos_vlan_svi(
encap=keys,
name=values.get("name").upper(),
svi_ip=values.get("ip", "DEFAULT"),
vrf=values.get("vrf", "DEFAULT").upper(),
hostname=values.get("hostname", "DEFAULT").upper()
)
vlan_entry.save()
def convert_vlans_to_epgs():
FvAEPg.objects.all().delete()
vlan_len = len(Nxos_vlan_svi.objects.all())
#print(vlan_len)
for vlan in Nxos_vlan_svi.objects.all():
#print("vlan: {} name: {}".format(vlan.encap, vlan.name))
epg = FvAEPg(
pcEnfPref='unenforced',
dn='uni/tn-NXOS-ACI-DEFAULT/ap-{}-LEGACY-{}_AP/epg-{}-{}_EPG'.format(vlan.vrf, vlan.hostname, vlan.encap,
vlan.name),
name='{}-{}_EPG'.format(vlan.encap, vlan.name),
tenant='NXOS-ACI-DEFAULT',
bd_tDn ='BD-{}-{}_BD'.format(vlan.encap, vlan.name),
fvRsDomAtt_tDn='LEGACY_PHY',
fvRsPathAtt='IPG-LEGACY-{}_IPG'.format(vlan.hostname),
encap=vlan.encap,
legacy_switch=vlan.hostname,
vrf=vlan.vrf,
fvSubnet=vlan.svi_ip
)
epg.save()
return vlan_len
#convert_vlans_to_epgs()
|
#Uses python3
import sys
class Dgraph:
"""
A class to represent a directed graph.
...
Attributes
----------
adj_list : list()
Vertices and their neighbors
prev : dict()
Vertex and value assigned at the beginning of the exploration
post : dict()
Vertex and value assigned at the end of the exploration
visited : dict()
Visited vertices
clock : 1
Value that is assigned to each vertex in previsit and postvisit,
increases by one after each assignment
cyclic : False
It is assumed that the graph to be traversed is not cyclical,
this value will change if a cycle is found
Methods
-------
previsit(self, vertex):
Assigns each vertex the current value of the clock when the exploration starts.
postvisit(self, vertex):
Assigns each vertex the current value of the clock when the exploration ends.
explore(self, vertex):
Traverse each neighbor of a given vertex and check if there is a cycle.
acyclic(self, num):
Returns 1 if there is a cycle otherwise 0.
"""
def __init__(self, adj_l):
"""
Constructs all the necessary attributes for a directed graph:
Parameters
----------
adj_list : list()
Vertices and their neighbors
prev : dict()
Vertex and value assigned at the beginning of the exploration
post : dict()
Vertex and value assigned at the end of the exploration
visited : dict()
Visited vertices
clock : 1
Value that is assigned to each vertex in previsit and postvisit,
increases by one after each assignment
cyclic : False
It is assumed that the graph to be traversed is not cyclical,
this value will change if a cycle is found
"""
self.adj_list = adj_l
self.prev = dict()
self.post = dict()
self.visited = dict()
self.clock = 1
self.cyclic = False
def previsit(self, vertex):
'''Assigns each vertex the current value of the clock when the exploration starts
and increases the clock value by one.'''
self.prev[vertex] = self.clock
self.clock += 1
def postvisit(self, vertex):
'''Assigns each vertex the current value of the clock when the exploration ends
and increases the clock value by one.'''
self.post[vertex] = self.clock
self.clock += 1
def explore(self, vertex):
'''Traverse each neighbor of a given vertex and check if there is a cycle.'''
self.visited[vertex] = 'visited'
self.previsit(vertex)
for neighbor in self.adj_list[vertex]:
# If there is a cycle, we return on every recursive call
# to avoid unnecessary iterations.
if self.cyclic:
return
# If we rediscover a vertex that has not been fully explored,
# that means there is a cycle.
if neighbor in self.prev and neighbor not in self.post:
self.cyclic = True
return
elif neighbor not in self.visited:
self.explore(neighbor)
self.postvisit(vertex)
def acyclic(self, num):
'''Returns 1 if there is a cycle otherwise 0.'''
for vertex in range(0, num): # vertex = index in the list
if vertex not in self.visited:
self.explore(vertex)
if self.cyclic:
return 1
return 0
if __name__ == '__main__':
graph = sys.stdin.read()
data = list(map(int, graph.split()))
n_ver, n_edg = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * n_edg):2], data[1:(2 * n_edg):2]))
adj = [[] for _ in range(n_ver)] # list of vertices and their neighbors
for (a, b) in edges:
adj[a - 1].append(b - 1)
d_graph = Dgraph(adj)
print(d_graph.acyclic(n_ver))
|
import decimal
import json as _json
import sys
import re
from _plotly_utils.optional_imports import get_module
from _plotly_utils.basevalidators import ImageUriValidator
PY36_OR_LATER = sys.version_info >= (3, 6)
class PlotlyJSONEncoder(_json.JSONEncoder):
"""
Meant to be passed as the `cls` kwarg to json.dumps(obj, cls=..)
See PlotlyJSONEncoder.default for more implementation information.
Additionally, this encoder overrides nan functionality so that 'Inf',
'NaN' and '-Inf' encode to 'null'. Which is stricter JSON than the Python
version.
"""
def coerce_to_strict(self, const):
"""
This is used to ultimately *encode* into strict JSON, see `encode`
"""
# before python 2.7, 'true', 'false', 'null', were include here.
if const in ("Infinity", "-Infinity", "NaN"):
return None
else:
return const
def encode(self, o):
"""
Load and then dump the result using parse_constant kwarg
Note that setting invalid separators will cause a failure at this step.
"""
# this will raise errors in a normal-expected way
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
# now:
# 1. `loads` to switch Infinity, -Infinity, NaN to None
# 2. `dumps` again so you get 'null' instead of extended JSON
try:
new_o = _json.loads(encoded_o, parse_constant=self.coerce_to_strict)
except ValueError:
# invalid separators will fail here. raise a helpful exception
raise ValueError(
"Encoding into strict JSON failed. Did you set the separators "
"valid JSON separators?"
)
else:
return _json.dumps(
new_o,
sort_keys=self.sort_keys,
indent=self.indent,
separators=(self.item_separator, self.key_separator),
)
def default(self, obj):
"""
Accept an object (of unknown type) and try to encode with priority:
1. builtin: user-defined objects
2. sage: sage math cloud
3. pandas: dataframes/series
4. numpy: ndarrays
5. datetime: time/datetime objects
Each method throws a NotEncoded exception if it fails.
The default method will only get hit if the object is not a type that
is naturally encoded by json:
Normal objects:
dict object
list, tuple array
str, unicode string
int, long, float number
True true
False false
None null
Extended objects:
float('nan') 'NaN'
float('infinity') 'Infinity'
float('-infinity') '-Infinity'
Therefore, we only anticipate either unknown iterables or values here.
"""
# TODO: The ordering if these methods is *very* important. Is this OK?
encoding_methods = (
self.encode_as_plotly,
self.encode_as_sage,
self.encode_as_numpy,
self.encode_as_pandas,
self.encode_as_datetime,
self.encode_as_date,
self.encode_as_list, # because some values have `tolist` do last.
self.encode_as_decimal,
self.encode_as_pil,
)
for encoding_method in encoding_methods:
try:
return encoding_method(obj)
except NotEncodable:
pass
return _json.JSONEncoder.default(self, obj)
@staticmethod
def encode_as_plotly(obj):
"""Attempt to use a builtin `to_plotly_json` method."""
try:
return obj.to_plotly_json()
except AttributeError:
raise NotEncodable
@staticmethod
def encode_as_list(obj):
"""Attempt to use `tolist` method to convert to normal Python list."""
if hasattr(obj, "tolist"):
return obj.tolist()
else:
raise NotEncodable
@staticmethod
def encode_as_sage(obj):
"""Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints"""
sage_all = get_module("sage.all")
if not sage_all:
raise NotEncodable
if obj in sage_all.RR:
return float(obj)
elif obj in sage_all.ZZ:
return int(obj)
else:
raise NotEncodable
@staticmethod
def encode_as_pandas(obj):
"""Attempt to convert pandas.NaT"""
pandas = get_module("pandas")
if not pandas:
raise NotEncodable
if obj is pandas.NaT:
return None
else:
raise NotEncodable
@staticmethod
def encode_as_numpy(obj):
"""Attempt to convert numpy.ma.core.masked"""
numpy = get_module("numpy")
if not numpy:
raise NotEncodable
if obj is numpy.ma.core.masked:
return float("nan")
else:
raise NotEncodable
@staticmethod
def encode_as_datetime(obj):
"""Convert datetime objects to iso-format strings"""
try:
return obj.isoformat()
except AttributeError:
raise NotEncodable
@staticmethod
def encode_as_date(obj):
"""Attempt to convert to utc-iso time string using date methods."""
try:
time_string = obj.isoformat()
except AttributeError:
raise NotEncodable
else:
return iso_to_plotly_time_string(time_string)
@staticmethod
def encode_as_decimal(obj):
"""Attempt to encode decimal by converting it to float"""
if isinstance(obj, decimal.Decimal):
return float(obj)
else:
raise NotEncodable
@staticmethod
def encode_as_pil(obj):
"""Attempt to convert PIL.Image.Image to base64 data uri"""
image = get_module("PIL.Image")
if image is not None and isinstance(obj, image.Image):
return ImageUriValidator.pil_image_to_uri(obj)
else:
raise NotEncodable
class NotEncodable(Exception):
pass
def iso_to_plotly_time_string(iso_string):
"""Remove timezone info and replace 'T' delimeter with ' ' (ws)."""
# make sure we don't send timezone info to plotly
if (iso_string.split("-")[:3] == "00:00") or (iso_string.split("+")[0] == "00:00"):
raise Exception(
"Plotly won't accept timestrings with timezone info.\n"
"All timestrings are assumed to be in UTC."
)
iso_string = iso_string.replace("-00:00", "").replace("+00:00", "")
if iso_string.endswith("T00:00:00"):
return iso_string.replace("T00:00:00", "")
else:
return iso_string.replace("T", " ")
def template_doc(**names):
def _decorator(func):
if not sys.version_info[:2] == (3, 2):
if func.__doc__ is not None:
func.__doc__ = func.__doc__.format(**names)
return func
return _decorator
def _natural_sort_strings(vals, reverse=False):
def key(v):
v_parts = re.split(r"(\d+)", v)
for i in range(len(v_parts)):
try:
v_parts[i] = int(v_parts[i])
except ValueError:
# not an int
pass
return tuple(v_parts)
return sorted(vals, key=key, reverse=reverse)
|
<reponame>rakytap/QAC_prime_factoring
from dwave.cloud import Client
#client = Client.from_config(token='<KEY>')
#available_solvers = client.get_solvers()
#print( available_solvers )
# Manual embedding using th ehybrid solver
print(' ')
print( 'Composed sampler' )
from dimod import FixedVariableComposite, ExactSolver
Q = {('x', 'x'): -1, ('x', 'z'): 2, ('z', 'x'): 0, ('z', 'z'): -1}
composed_sampler = FixedVariableComposite(ExactSolver())
sampleset = composed_sampler.sample_qubo(Q, fixed_variables={'x': 1})
print(sampleset)
# QBsolv
print(' ')
print( 'QBsolv' )
from dwave_qbsolv import QBSolv
# Set Q for the minor-embedded problem QUBO
qubit_biases = {(0, 0): 0.3333, (1, 1): -0.333, (4, 4): -0.333, (5, 5): 0.333}
coupler_strengths = {(0, 4): 0.667, (0, 5): -1, (1, 4): 0.667, (1, 5): 0.667}
Q = dict(qubit_biases)
Q.update(coupler_strengths)
response = QBSolv().sample_qubo(Q)
print("samples=" + str(list(response.samples())))
print("energies=" + str(list(response.data_vectors['energy'])))
from dwave_qbsolv import QBSolv
import neal
import itertools
import random
qubo_size = 500
subqubo_size = 30
Q = {t: random.uniform(-1, 1) for t in itertools.product(range(qubo_size), repeat=2)}
sampler = neal.SimulatedAnnealingSampler()
response = QBSolv().sample_qubo(Q, solver=sampler, solver_limit=subqubo_size)
print(response)
# Manual embedding
print(' ')
print( 'Manual embedding' )
from dwave.system.samplers import DWaveSampler
# Set Q for the minor-embedded problem QUBO
qubit_biases = {(0, 0): 0.3333, (1, 1): -0.333, (4, 4): -0.333, (5, 5): 0.333}
coupler_strengths = {(0, 4): 0.667, (0, 5): -1, (1, 4): 0.667, (1, 5): 0.667}
Q = dict(qubit_biases)
Q.update(coupler_strengths)
# Sample once on a D-Wave system and print the returned sample
response = DWaveSampler().sample_qubo(Q, num_reads=100)
print(response)
#for (sample, energy, num_occurrences, chain_break) in response.data():
#(sample, energy, num_occurrences, chain_break) = item
# print(sample, "Energy: ", energy, "Occurrences: ", num_occurrences)
# Automated minor embedding
print(' ')
print( 'Automated minor embedding' )
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
# Set Q for the problem QUBO
linear = {('x0', 'x0'): -1, ('x1', 'x1'): -1, ('x2', 'x2'): -1}
quadratic = {('x0', 'x1'): 2, ('x0', 'x2'): 2, ('x1', 'x2'): 2}
Q = dict(linear)
Q.update(quadratic)
# Minor-embed and sample 1000 times on a default D-Wave system
response = EmbeddingComposite(DWaveSampler()).sample_qubo(Q, num_reads=100)
print(response)
print(response.data())
for (sample, energy, num_occurrences, chain_break) in response.data():
#(sample, energy, num_occurrences, chain_break) = item
print(sample, "Energy: ", energy, "Occurrences: ", num_occurrences)
|
<filename>sandiego.gov/businesses/bundle.py
'''
Example bundle that builds a single partition with a table of random numbers
'''
from ambry.bundle import BuildBundle
class Bundle(BuildBundle):
''' '''
def __init__(self,directory=None):
super(Bundle, self).__init__(directory)
@property
def header(self):
import csv
from ambry.orm import Table
fn = self.filesystem.download('active1')
with open(fn) as f:
r = csv.reader(f)
return [ Table.mangle_name(c) for c in r.next() ]
def gen_rows(self, as_dict = True):
import csv
from dateutil.parser import parse as parse_date
for k in self.metadata.sources:
fn = self.filesystem.download(k)
with open(fn) as f:
r = csv.reader(f);
r.next() # Skip the header
header = self.header # Get our processed header
for row in r:
if as_dict:
row = dict(zip(header, row))
row['dba_name'] = unicode(row['dba_name'],errors='ignore') # One row has a funny char
row['creation_dt'] = parse_date(row['creation_dt'])
row['start_dt'] = parse_date(row['start_dt'])
row['exp_dt'] = parse_date(row['exp_dt'])
yield row
else:
yield row
def meta(self):
self.prepare()
self.schema.update('businesses', self.gen_rows(as_dict=False), n=500, header=self.header, logger=self.init_log_rate())
self.schema.write_schema()
return True
def build(self):
self.build_load()
self.build_ck_geocoder()
self.build_dstk_geocoder()
self.build_block_cross()
return True
def build_load(self):
"""Load the CSV file of original data"""
p = self.partitions.new_partition(table='businesses')
p.clean()
lr = self.init_log_rate(10000)
good = 0
bad = 0
with p.database.inserter() as ins:
for i, row in enumerate(self.gen_rows()):
row = dict(row)
row['business_acct'] = row['business_acct_']
ins.insert(row)
if self.run_args.test and i > 100:
break
lr()
return True
def build_ck_geocoder(self):
"""Create a crosswalk to CK geocoded addresses, which link to SANDAG data"""
from ambry.geo.geocoder import Geocoder
city_subs = {
'La Jolla': 'San Diego'
}
g = Geocoder(self.library.dep('geocoder').partition, city_subs)
lr = self.init_log_rate(250)
businesses = self.partitions.find(table='businesses')
p = self.partitions.find_or_new(table = 'ck_addresses')
p.clean()
good = 0
bad = 0
with p.inserter() as ins:
for i, bus in enumerate(businesses.rows):
row = {
'businesses_id' : bus['id']
}
try:
# This just lets us know what addresses aren't geocoding. We'll use the faulures
# as bad addresses in a geocoder update.
if bus['city']:
row['address_id'], result, parsed = g.parse_and_code(bus['address'],
city=bus['city'].title(), state = "CA", zip=bus['zip'])
row['parsed_addr'] = "{}, {}, CA {}".format(parsed.text, parsed.locality.city, parsed.locality.zip)
if result:
row.update(result)
row['name'] = (
row['direction']+' ' if row['direction'] else '' +
row['name']+
' '+row['suffix'] if row['suffix'] else ''
)
row['id'] = None
good += 1
else:
bad += 1
except Exception as e:
self.error("Failed to parse row {}: {} : {} ".format(i, bus['address'], e.message))
raise
lr("Geocode CK: {} good / {} bad ( {}%) of {}".format(good, bad, round(float(good) / float(good+bad) *100,1), good+bad ))
ins.insert(row)
if self.run_args.test and i > 500:
break
def build_dstk_geocoder(self):
"""Geocode with the Data Science Toolkit"""
from ambry.geo.geocoders import DstkGeocoder
lr = self.init_log_rate(250)
businesses = self.partitions.find(table='businesses')
def address_gen():
for row in businesses.query("SELECT * FROM businesses"):
address = "{}, {}, {} {}".format(row['address'], row['city'], row['state'], row['zip'])
yield (address, row)
dstk_service = self.config.service('dstk')
dstk_gc = DstkGeocoder(dstk_service, address_gen())
p = self.partitions.find_or_new(table = 'dstk_addresses')
p.clean()
good = 0
bad = 0
with p.inserter() as ins:
for i, (k, r, inp_row) in enumerate(dstk_gc.geocode()):
row = {
'businesses_id' : inp_row['id']
}
if r:
row.update(dict(r))
row['number'] = r.get('street_number', None)
row['name'] = r.get('street_name', None)
row['city'] = r.get('locality', None)
row['state'] = r.get('region', None)
row['lat'] = r.get('latitude', None)
row['lon'] = r.get('longitude', None)
row['county'] = r.get('fips_county', None)
lr("Geocode DSTK")
ins.insert(row)
if self.run_args.test and i > 500:
break
def build_block_cross(self):
"""Build the bus_block_cross crosswalk file to assign businesses to blocks. """
from ambry.geo.util import find_geo_containment
lr = self.init_log_rate(3000)
def generate_geometries():
blocks = self.library.dep('blocks').partition
# Note, ogc_fid is the primary key. The id column is created by the shapefile.
for i,block in enumerate(blocks.query("SELECT AsText(geometry) AS wkt, geoid FROM blocks")):
lr('Load rtree')
yield i, block['geoid'] , block['wkt']
def generate_points():
p = self.partitions.find(table = 'dstk_addresses')
for row in p.rows:
if row['lon'] and row['lat']:
yield (row['lon'], row['lat']), row['businesses_id']
def mark_contains():
p = self.partitions.find_or_new(table='bus_block_cross')
p.clean()
with p.inserter() as ins:
while True:
(p,point_obj,geometry, poly_obj) = yield # Get a value back from find_geo_containment
ins.insert(
dict(businesses_id = point_obj,
block_geoid = poly_obj, # New name
geoid = poly_obj # Old name, for development
))
lr('Marking point containment')
find_geo_containment(generate_geometries(), generate_points(), mark_contains())
|
<reponame>mintproject/topoflow36
#-------------------------------------------------------------------
# Copyright (c) 2013-2020, <NAME>
#
# Apr 2013. New time interpolator class from/for emeli.py.
#
#-------------------------------------------------------------------
#
# class time_interp_data()
# __init__()
# update()
#
# class time_interpolator()
# __init__()
# initialize()
# update()
# update_all()
# get_values()
# convert_time_units()
#
#-------------------------------------------------------------------
import numpy as np
#-------------------------------------------------------------------
class time_interp_data():
#--------------------------------------------------------
# Note: This is a small "utility class". We create an
# instance of this class for every long_var_name
# that is shared between components.
#--------------------------------------------------------
# Note: Additional arguments will need to be added in
# order to perform time interpolation by a method
# other than "Linear" (or a new class?).
#--------------------------------------------------------
def __init__( self, v1=None, t1=None, long_var_name=None ):
## interp_method='Linear'):
#-------------------------------------------
# Save (v1,t1) to (v2,t2) because update()
# first sets (v1,t1) from old (v2,t2).
#-------------------------------------------
self.v2 = v1
self.t2 = t1
self.long_var_name = long_var_name
## self.interp_method = interp_method
#--------------------------------------
# Need this, too, for in-place updates
#--------------------------------------
self.v1 = v1.copy()
self.t1 = t1.copy()
#--------------
# For testing
#--------------
## if (self.long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'):
## print 'In __init__():'
## print '(P1,P2, t1,t2) =', self.v1, self.v2, self.t1, self.t2
# __init__()
#----------------------------------------------------------
def update( self, v2=None, t2=None ):
#----------------------------------------------------
# Note: v1 and v2 could be 0D, 1D, 2D or 3D arrays.
# However, since they are NumPy ndarrays, the
# equations used below will work regardless
# of the array's rank.
#----------------------------------------------------
# Note: We can use "in-place" assignments for v1
# and v2 as long as their rank is > 0.
#----------------------------------------------------
#---------------------------------------------
# Update the "start values" (old end values)
# (in-place, if possible)
# Note: try/except is slightly faster.
# Note: Need to use "copy()" as shown.
#---------------------------------------------
self.t1 = self.t2.copy()
try:
self.v1[:] = self.v2.copy()
except:
self.v1 = self.v2.copy()
#-----------------------------------
## if (np.ndim( self.v1 ) > 0):
## self.v1[:] = self.v2.copy()
## else:
## self.v1 = self.v2.copy()
#--------------------------
# Update the "end values"
# (in-place, if possible)
# Note: Need to use "copy()" as shown.
#---------------------------------------------
self.t2 = t2
try:
self.v2[:] = v2.copy() ## NEED THIS!
except:
self.v2 = v2.copy() ## NEED THIS!
#-----------------------------------
## if (np.ndim( self.v2 ) > 0):
## self.v2[:] = v2
## else:
## self.v2 = v2
#---------------------------------------------
# Update the interpolation parameters, a & b
# They are used in get_values2().
#---------------------------------------------
# This would also work:
# v1_ne_v2 = (v2 - self.v1) != 0
# if np.any( v1_ne_v2 ) and (t2 != self.t1):
#----------------------------------------------------
dv = np.abs(v2 - self.v1)
dv_min = dv.min()
if (dv_min != 0) and (t2 != self.t1):
self.a = (v2 - self.v1) / (t2 - self.t1)
self.b = v2 - (self.a * t2)
else:
#------------------------------------------
# Variables that don't vary in time will
# have v1 = v2, but t2 > t1.
# Disabled TopoFlow components will have
# v2 = v1 and t2 = t1, but they may still
# provide default values (e.g. precip=0).
#------------------------------------------
# This a and b gives "no interpolation",
# that is, v[t] = v1 = v2.
#------------------------------------------
self.a = np.float64(0)
self.b = v2
#--------------
# For testing
#--------------
## if (self.long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'):
## print '(P1,P2, t1,t2) =', self.v1, self.v2, self.t1, self.t2
# update()
#----------------------------------------------------------
# time_interp_data() (class)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
class time_interpolator():
#----------------------------------------
# Define some unit-conversion constants
#----------------------------------------
secs_per_min = 60
secs_per_hour = 60 * secs_per_min
secs_per_day = 24 * secs_per_hour
secs_per_year = 365 * secs_per_day
secs_per_month = secs_per_year / 12 #########
#----------------------------------------------------------
def __init__( self, comp_set, comp_names, vars_provided,
method='Linear' ):
#-------------------------------------------------------
# Note: These are currently passed in from framework.
#
# comp_set = a dictionary that takes a comp_name
# key and returns a reference to a
# BMI model instance.
#
# comp_names = a list of all comp_names that
# provide vars to other components
# (and all of the keys in comp_set)
#
# vars_provided = a dictionary that takes a
# comp_name key and returns a
# list of all the long_var_names
# that the comp actually provides
# to another component in the set
#
# method = 'None' or 'Linear' (so far)
#-------------------------------------------------------
self.comp_set = comp_set
self.provider_comp_list = comp_names
self.vars_provided = vars_provided
self.interpolation_method = method.title() ########
# __init__()
#----------------------------------------------------------
def initialize( self, SILENT=False ):
#------------------------------------------------------------
# Note: This function initializes a dictionary called:
# self.time_interp_vars and should be called from the
# framework's initialize() function.
#
# Given "long_var_name" as a key, the dictionary
# returns a bundle of variables that are needed to
# perform time interpolation for that variable when
# it is requested from other components.
#
# Note that self.vars_provided[ provider_comp_name ]
# contains a list of vars that are actually provided
# by that provider port to some other component in
# the current set of components (i.e. "comp_set").
#-------------------------------------------------------------
# Note: If we could somehow distinguish between provided
# vars that vary in time and those that don't, then
# we could avoid some extra work. But this works.
#-------------------------------------------------------------
# Note: provider_comp_list always includes the Driver.
#-------------------------------------------------------------
self.SILENT = SILENT
method = self.interpolation_method
if not(self.SILENT):
print('Time interpolation method = ' + method)
print()
#----------------------------
# Case of no interpolation
# (i.e. "steps" or "jumps")
#----------------------------
if (method == 'None'):
self.time_interp_vars = None ####
#--------------------------------------------
# For new method, we must call bmi.update()
# for every provider here. (4/13/13)
#--------------------------------------------
for comp_name in self.provider_comp_list:
bmi = self.comp_set[ comp_name ]
bmi.update( -1.0 )
## print 'Updated component: ' + comp_name
## print ' '
return
#-------------------------------
# Case of Linear interpolation
#-------------------------------
if (method == 'Linear'):
self.time_interp_vars = dict()
for comp_name in self.provider_comp_list:
bmi = self.comp_set[ comp_name ]
#---------------------------------------
# Get t1 and convert units, if needed.
#---------------------------------------
comp_time_units = bmi.get_time_units()
t1 = bmi.get_current_time()
t1 = self.convert_time_units( t1, comp_time_units )
#---------------------------------------------------
# Get vars at start of interpolation time interval
#---------------------------------------------------
for long_var_name in self.vars_provided[ comp_name ]:
v1 = bmi.get_values( long_var_name )
data = time_interp_data( v1=v1, t1=t1, \
long_var_name=long_var_name )
self.time_interp_vars[ long_var_name ] = data
#--------------------------------------------
# Call this component's update() just once.
#---------------------------------------------
# Note: Driver is updated here, too, even if
# it doesn't provide vars to others.
#---------------------------------------------
bmi.update( -1.0 )
#---------------------------------------
# Get t2 and convert units, if needed.
#---------------------------------------
t2 = bmi.get_current_time()
t2 = self.convert_time_units( t2, comp_time_units )
#--------------
# For testing
#--------------
## print 'Updated component: ' + comp_name
## print ' (t1, t2) =', t1, t2
#-------------------------------------------------
# Get vars at end of interpolation time interval
#-------------------------------------------------
for long_var_name in self.vars_provided[ comp_name ]:
v2 = bmi.get_values( long_var_name )
#-------------------------------------
# Save (v2,t2) and update the time
# interpolation parameters a and b.
#-------------------------------------
self.time_interp_vars[ long_var_name ].update(v2, t2)
return
#-------------------------------------
# Case of Cubic Spline interpolation
#-----------------------------------------------------
# Note: Cubic spline interpolation with a natural or
# clamped boundary condition requires that all
# time interval endpoint values are available
# (i.e. for an entire model run).
# However, during a run with Linear or None
# interpolation we could compute the a0 and b0
# that are needed to compute a[n], b[n], c[n]
# and d[n] for a subsequent run that uses
# cubic spline interpolation.
#-----------------------------------------------------
# Note: We need to call bmi.update() 3 times here,
# and then just once below in update().
#-----------------------------------------------------
## if (method == 'Cubic'):
## self.time_interp_vars = dict()
## return
# initialize()
#-------------------------------------------------------------------
def update( self, comp_name, time ):
#------------------------------------------------------------
# Note: This function provides automatic time-interpolation
# for components that have different time steps.
#------------------------------------------------------------
# Note: The "framework time step" will be the same as the
# component with the smallest time step.
#
# If follows that if the time argument is "framework
# time", then we only need to call bmi.update() once
# for any component to make its internal time greater
# than the framework time.
#
# We must make sure that this method works when called
# for the component (comp_name) that has the smallest
# time step. In that case, we don't need to do any
# time interpolation and should take the "None" branch
# below. #### CHECK THAT THIS HAPPENS. ####
#------------------------------------------------------------
# Note: A component's current time is incremented every
# time its bmi.update() method is called, as when
# done by the initialize() method.
#------------------------------------------------------------
DEBUG = False
## DEBUG = True
bmi = self.comp_set[ comp_name ]
#-----------------------------------------------------
# Get current time of component with this comp_name.
# Convert units to framework time units, if needed.
#-----------------------------------------------------
comp_time_units = bmi.get_time_units()
comp_time = bmi.get_current_time()
# comp_time0 = comp_time.copy()
comp_time = self.convert_time_units( comp_time, comp_time_units )
#--------------
# For testing
#--------------
## print 'comp_name =', comp_name
## print 'comp_time before =', comp_time0
## print 'comp_time after =', comp_time
## print ' '
if (DEBUG):
print('=============================================')
print('In update_time_interpolation():')
print(' time (fmwk) =', time)
print(' comp_name =', comp_name)
print(' comp_time =', comp_time)
#--------------------------------------------
# Do we need to update interpolation vars ?
#------------------------------------------------
# Note: DISABLED components have comp_time = 0.
#------------------------------------------------
### if (time < comp_time): # (This works, too.)
if (time <= comp_time):
if (DEBUG):
print(' NO update for: ' + comp_name + ' interp. vars')
return
#------------------------------------------------
# The current "framework time" has passed this
# model component's internal time so we need to
# call the model's bmi.update() method and then
# update the time interpolation vars.
#------------------------------------------------
if (DEBUG):
print(' Framework updated: ' + comp_name + ' interp. vars')
#------------------------------------------------
# Note: We need to check the component status
# here because otherwise bmi.update() is
# called every time below for DISABLED
# components.
#------------------------------------------------
# Using (status = 'initialized') works because
# the initialize() method caused all other
# components to reach "updated" status.
#------------------------------------------------
comp_status = bmi.get_status()
# if (comp_status == 'disabled'): # (not used/ready yet)
if (comp_status == 'initialized'): # (this works)
return
#---------------------------
# Case of no interpolation
#---------------------------
if (self.interpolation_method == 'None'):
#-------------------------------------------------
# Since the framework has the smallest timestep,
# we should only need to call bmi.update() once
# in order to get comp_time > time.
#-------------------------------------------------
bmi.update( -1.0 )
## self.update( comp_name ) # (Checks for failure.)
return
#-------------------------------
# Case of Linear interpolation
#-------------------------------
if (self.interpolation_method == 'Linear'):
#--------------------------------------------
# Call this component's update() just once.
#--------------------------------------------
bmi.update( -1.0 )
## self.update( comp_name ) # (has error messages)
#---------------------------------------
# Get t2 and convert units, if needed.
#---------------------------------------
comp_time_units = bmi.get_time_units()
t2 = bmi.get_current_time()
t2 = self.convert_time_units( t2, comp_time_units )
#---------------------------------------------------
# Get values at end of interpolation time interval
#---------------------------------------------------
for long_var_name in self.vars_provided[ comp_name ]:
#------------------------------------------------
# Note: bmi.get_values() works for any rank.
#------------------------------------------------
# self.time_interp_vars is a dictionary that is
# initialized in the framework's initialize().
#------------------------------------------------
v2 = bmi.get_values( long_var_name )
#--------------
# For testing
#--------------
## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'):
## print '(time, P) =', t2, v2
i_vars = self.time_interp_vars[ long_var_name ]
#-------------------------------------
# This also updates v1 and t1 first.
#-------------------------------------
i_vars.update(v2, t2)
#--------------
# For testing
#--------------
## print 'Updated component: ' + comp_name
## print ' (t1, t2) =', i_vars.t1, i_vars.t2
return
#-------------------------------------
# Case of Cubic Spline interpolation
#-------------------------------------
## if (self.interpolation_method == 'Cubic'):
# update()
#-------------------------------------------------------------------
def update2( self, comp_name ):
#------------------------------------------------------------
# Note: This function provides automatic time-interpolation
# for components that have different time steps.
#------------------------------------------------------------
# Note: The "framework time step" will be the same as the
# component with the smallest time step.
#
# If follows that if the time argument is "framework
# time", then we only need to call bmi.update() once
# for any component to make its internal time greater
# than the framework time.
#
# We must make sure that this method works when called
# for the component (comp_name) that has the smallest
# time step. In that case, we don't need to do any
# time interpolation and should take the "None" branch
# below. #### CHECK THAT THIS HAPPENS. ####
#------------------------------------------------------------
# Note: A component's current time is incremented every
# time its bmi.update() method is called, as when
# done by the initialize() method.
#------------------------------------------------------------
# Note: In this version, we assume that bmi.update() was
# already called by caller of this method. (4/18/13)
#------------------------------------------------------------
DEBUG = False
## DEBUG = True
#---------------------------
# Case of no interpolation
#---------------------------
if (self.interpolation_method == 'None'):
return
#------------------------------------------------
# Note: We need to check the component status
# here because otherwise bmi.update() is
# called every time below for DISABLED
# components.
#------------------------------------------------
# Using (status = 'initialized') works because
# the initialize() method caused all other
# components to reach "updated" status.
#------------------------------------------------
bmi = self.comp_set[ comp_name ] # (or pass in bmi)
comp_status = bmi.get_status()
# if (comp_status == 'disabled'): # (not used/ready yet)
if (comp_status == 'initialized'): # (this works)
return
#-------------------------------
# Case of Linear interpolation
#-------------------------------
if (self.interpolation_method == 'Linear'):
#---------------------------------------
# Get t2 and convert units, if needed.
#---------------------------------------
comp_time_units = bmi.get_time_units()
t2 = bmi.get_current_time()
t2 = self.convert_time_units( t2, comp_time_units )
#---------------------------------------------------
# Get values at end of interpolation time interval
#---------------------------------------------------
for long_var_name in self.vars_provided[ comp_name ]:
#------------------------------------------------
# Note: bmi.get_values() works for any rank.
#------------------------------------------------
# self.time_interp_vars is a dictionary that is
# initialized in the framework's initialize().
#------------------------------------------------
v2 = bmi.get_values( long_var_name )
#--------------
# For testing
#--------------
## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'):
## print '(time, P) =', t2, v2
i_vars = self.time_interp_vars[ long_var_name ]
#-------------------------------------
# This also updates v1 and t1 first.
#-------------------------------------
i_vars.update(v2, t2)
#--------------
# For testing
#--------------
## print 'Updated component: ' + comp_name
## print ' (t1, t2) =', i_vars.t1, i_vars.t2
return
#-------------------------------------
# Case of Cubic Spline interpolation
#-------------------------------------
## if (self.interpolation_method == 'Cubic'):
# update2()
#-------------------------------------------------------------------
def update_all( self, time ):
for comp_name in self.provider_comp_list:
self.update( comp_name, time )
# update_all()
#-------------------------------------------------------------------
def get_values( self, long_var_name, comp_name, time ):
#-------------------------------------------------------
# Note: This method returns a NumPy "ndarray" object
# that Babel is able to pass to other components
# as a SIDL generic array.
#-------------------------------------------------------
# Note: The update() method is called for comp_name
# before this is called.
#-------------------------------------------------------
bmi = self.comp_set[ comp_name ] # (pass in bmi ?)
#-------------------------------------------------------
# Has this component been disabled? If so, it doesn't
# advance time or update its initial values so time
# interpolation is not needed.
#------------------------------------------------------------
# TopoFlow components currently have a "comp_status"
# attribute that is either "Enabled" or "Disabled", set in
# their CFG file and read by BMI_base.read_config_file().
# They also have a "status" attribute that is from the
# OpenMI status types (e.g. "initialized", "initializing").
# Should we add "disabled" to the OpenMI status types?
#------------------------------------------------------------
# If a component has already be finalized, then just get
# its current (final) values; do not interpolate. This
# is needed for framework.finalize_all() to work. (8/20/13)
#------------------------------------------------------------
comp_status = bmi.get_status()
if (comp_status == 'initialized') or \
(comp_status == 'finalized'):
return bmi.get_values( long_var_name )
#---------------------------
# Case of no interpolation
#-------------------------------------------------------
# Note that if (time < comp_time) then we are
# just returning the same value that all users already
# have. Maybe we can avoid doing this somehow.
#-------------------------------------------------------
if (self.interpolation_method == 'None'):
#------------------------------------
# For testing. Is time in interval?
#------------------------------------
bmi_time = bmi.get_current_time()
if (time > bmi_time):
print('--------------------------------------------')
print(' WARNING: (in time_interpolation.py)')
print(' time > bmi_time in bmi.get_values().')
print(' time, bmi_time =', time, bmi_time)
print(' comp_name =', comp_name)
print('--------------------------------------------')
print(' ')
return bmi.get_values( long_var_name )
#-------------------------------
# Case of Linear interpolation
#-------------------------------
if (self.interpolation_method == 'Linear'):
#------------------------------------------------
# Compute and return a time-interpolated value.
#------------------------------------------------
i_vars = self.time_interp_vars[ long_var_name ]
#------------------------------------
# For testing. Is time in interval?
#------------------------------------
if (time > i_vars.t2):
print('#######################################')
print(' ERROR: time > t2 in get_values().')
print(' time, t2 =', time, i_vars.t2)
print(' comp_name =', comp_name)
print('#######################################')
print(' ')
value = (i_vars.a * time) + i_vars.b
#--------------
# For testing
#--------------
## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'):
## print '(time, P, a, b) =', time, value, i_vars.a, i_vars.b
return value
#-------------------------------------
# Case of Cubic Spline interpolation
#-------------------------------------
## if (self.interpolation_method == 'Cubic'):
## #------------------------------------------------
## # Compute and return a time-interpolated value.
## #------------------------------------------------
## value = ??????
## return value
# get_values()
#-------------------------------------------------------------------
def convert_time_units( self, in_time, in_units ):
#-----------------------------------------------
# Note: Conversion constants are defined just
# inside (at top of) class declaration.
#-----------------------------------------------
#----------------------------------
# Convert "in_units" to "seconds"
#----------------------------------
if (in_units in ['years', 'y']):
time = in_time * self.secs_per_year
elif (in_units == 'months'): ### Use 'm' ????
time = in_time * self.secs_per_month
elif (in_units in ['days', 'd']):
time = in_time * self.secs_per_day
elif (in_units in ['hours', 'h']):
time = in_time * secs_per_hour
elif (in_units in ['minutes','m']): ### month?
time = in_time * self.secs_per_min
else:
time = in_time.copy()
## time = in_time
return time
# convert_time_units()
#-------------------------------------------------------------------
|
<filename>task_edit.py
import discord
import asyncio
import datetime
import re
import mysql.connector
import settings as setting
###############################################################################################################
# MANUAL IMPORT
###############################################################################################################
import email_template as EMAIL_TEMPLATE
import leveling_system as LEVEL_SYSTEM
import dm_template as DM_TEMPLATE
import deadline_cross_reminder as DEADLINE
async def add(client, ctx, task_id):
mydb = mysql.connector.connect(host=setting.HOST, port=setting.PORT, database=setting.DATABASE, user=setting.USER, password=setting.PASSWORD)
mycur = mydb.cursor(buffered=True)
inputs = []
mycur.execute("select * from task where Id = %s", (task_id, ))
row = mycur.fetchone()
title = row[1]
description = row[2]
assigned_to = row[3]
status = row[5]
estimated_time = row[6]
estimated_xp = row[8]
embed = discord.Embed(title="Task Details", description="The details of task with id #{} is:".format(task_id))
embed.add_field(name="Id", value=task_id, inline=True)
embed.add_field(name="Title", value=title, inline=True)
embed.add_field(name="Description", value=description, inline=True)
embed.add_field(name="Assigned_To", value=assigned_to, inline=True)
embed.add_field(name="Status", value=status, inline=True)
embed.add_field(name="Estimated_Time", value=estimated_time, inline=True)
embed.add_field(name="Estimated_XP", value=estimated_xp, inline=True)
text = await ctx.send(embed=embed)
def insert(insert_query, value):
mycur.execute(insert_query, value)
mydb.commit()
def update(update_query, value):
mycur.execute(update_query, value)
mydb.commit()
def pred(m):
return m.author == ctx.author and m.channel == ctx.channel
def check(reaction, user):
return (str(reaction.emoji) == '☑' or str(reaction.emoji) == '❎') and user == ctx.message.author
async def take_input():
try:
message = await client.wait_for('message', check=pred, timeout=8640.0)
except asyncio.TimeoutError:
await ctx.send("Timeout. Please request a koder for reregistration.")
else:
return message
async def take_reaction():
try:
result = await client.wait_for('reaction_add', check=check, timeout=8640.0)
except asyncio.TimeoutError:
await ctx.send("Timeout. Please request a koder for reregistration.")
else:
reaction, user = result
if (str(reaction.emoji) == '☑'):
return True
if (str(reaction.emoji) == '❎'):
return False
# Embed for Title
embed = discord.Embed(title="Hello there! (0/5)",
description="Let's begin with editing task.\n\nPlease enter title of your task.")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\nMessage_Logs command")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
inputs.append(textInput.content)
await textInput.delete()
await textEmbed.delete()
# Embed for Description
embed = discord.Embed(title="Great, next step! (1/5)",
description="Please enter description of task\n(we won't spam, pinky promise!)")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\nThis is basically about creating a message_log command in discord")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
inputs.append(textInput.content)
await textInput.delete()
await textEmbed.delete()
# Embed for Assigned_To
embed = discord.Embed(title="Great, next step! (2/5)",
description="Please enter the name to whom the task is Assigned_To\n(we won't spam, pinky promise!)")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\nJhone doe")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
inputs.append(textInput.content)
print(textInput.content)
await textInput.delete()
await textEmbed.delete()
# Embed for Estimated_XP
embed = discord.Embed(title="Great, next step! (3/5)",
description="Please enter **Estimated_XP** for the task\n(we won't spam, pinky promise!)")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\n50 XP")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
inputs.append(textInput.content)
await textInput.delete()
await textEmbed.delete()
# Embed for Estimated_Time
embed = discord.Embed(title="Great, next step! (4/5)",
description="Please enter Estimated_Time\n(we won't spam, pinky promise!)")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\nNumbers of hours\n 6 or 8")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
message = textInput
timestamp = textInput.created_at
discord_username = textInput.author
author = textInput.author
discord_username = str(discord_username)
username, client_id = discord_username.split('#')
inputs.append(textInput.content)
await textInput.delete()
await textEmbed.delete()
# Embed for Status
embed = discord.Embed(title="Great, next step! (5/5)",
description="Please enter status of task\n(we won't spam, pinky promise!)")
embed.set_author(name="Welcome to Koders | Registration",
icon_url="https://cdn.discordapp.com/attachments/700257704723087359/709710821382553640/K_with_bg_1.png")
embed.set_footer(text="Example\nIn_Progress, Completed")
textEmbed = await ctx.send(embed=embed)
textInput = await take_input()
inputs.append(textInput.content)
await textInput.delete()
await textEmbed.delete()
embed = discord.Embed(title="Confirmation", description="Please recheck the information and type yes or no",
color=0x0e71c7)
embed.set_author(name="Are you sure?", url="https://www.github.com/koders-in/integrity")
embed.set_thumbnail(url="https://image-1.flaticon.com/icons/png/32/2921/2921124.png")
embed.add_field(name="Id", value=task_id, inline=True)
embed.add_field(name="Title", value=inputs[0], inline=True)
embed.add_field(name="Description", value=inputs[1], inline=True)
embed.add_field(name="Assigned_To", value=inputs[2], inline=True)
embed.add_field(name="Estimated_XP", value=inputs[3], inline=True)
embed.add_field(name="Estimated_Hours", value=inputs[4], inline=True)
embed.add_field(name="Status", value=inputs[5], inline=True)
embed.add_field(name="Joined-at", value=timestamp, inline=True)
text = await ctx.send(embed=embed)
await text.add_reaction(emoji="☑")
await text.add_reaction(emoji="❎")
title = inputs[0]
description = inputs[1]
assigned_to = inputs[2]
estimated_xp = inputs[3]
estimated_time = inputs[4]
status = inputs[5]
result = await take_reaction()
await text.delete()
guild = ctx.guild
channel = discord.utils.find(lambda c : c.id==message.channel.id, guild.channels)
if (result):
update_query = "update task set Title = %s, Description = %s, Assigned_To = %s, Estimated_XP = %s, Estimated_Time = %s, Status = %s where Id = %s"
value = (title, description, assigned_to, estimated_xp, estimated_time, status, task_id)
update(update_query, value)
await ctx.send("The task with id {} has been edited succesfully.".format(task_id))
task_edit_channel = client.get_channel(setting.TASK_EIDT_CHANNEL_ID)
mycur.execute("select * from task where Id = %s", (task_id, ))
row = mycur.fetchone()
title = row[1]
description = row[2]
assigned_to = row[3]
status = row[5]
estimated_time = row[6]
estimated_xp = row[8]
embed = discord.Embed(title="Task Details", description="The details of task with id #{} is:".format(task_id))
embed.add_field(name="Id", value=task_id, inline=True)
embed.add_field(name="Title", value=title, inline=True)
embed.add_field(name="Description", value=description, inline=True)
embed.add_field(name="Assigned_To", value=assigned_to, inline=True)
embed.add_field(name="Status", value=status, inline=True)
embed.add_field(name="Estimated_Time", value=estimated_time, inline=True)
embed.add_field(name="Estimated_XP", value=estimated_xp, inline=True)
text = await task_edit_channel.send(embed=embed) |
from pyisim.entities.role import Role
from pyisim.exceptions import InvalidOptionError
from pyisim.entities import (
Activity,
Access,
OrganizationalContainer,
Person,
Service,
StaticRole,
DynamicRole,
ProvisioningPolicy,
Group,
Account,
)
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from pyisim.auth import Session
def groups(
session: "Session",
by: str,
service_dn: str = None,
group_profile_name="",
group_info="",
) -> List[Group]:
"""
Service group search.
Args:
session (Session): Active ISIM Session
by (str): "account", "access" or "service"
service_dn (str, optional): Parent service DN if searching by service. Defaults to None.
group_profile_name (str, optional): Group profile name if searching by service. Defaults to None.
group_info (str, optional): Group name or description if searching by service. Defaults to None.
Raises:
NotImplementedError: Search by account and access not implemented
Returns:
List[Group]: Search results
"""
if by == "account":
raise NotImplementedError
elif by == "access":
raise NotImplementedError
elif by == "service":
ret = session.soapclient.get_groups_by_service(
service_dn, group_profile_name, group_info
)
else:
raise InvalidOptionError("Invalid option")
return [Group(session, group=g) for g in ret]
def people(
session: "Session",
by="cn",
search_filter="*",
profile_name="Person",
attributes="*",
embedded: List[str] = None,
roles=False,
limit=50,
) -> List[Person]:
"""
Person search
Args:
session (Session): Active ISIM Session
search_filter (str, optional): Filter to search by. Defaults to "*".
by (str, optional): LDAP Attribute to search by. Defaults to "cn".
profile_name (str, optional): Limits the search scope. Defaults to "Person", which returns both Person and BPPerson entities.
attributes (str, optional): Attributes to return in the Person instance. Defaults to "*".
embedded (List[str], optional): Attributes to embed as PyISIM entities. Can only support "Person" attributes (ersponsor, manager, etc).
roles (bool, optional): If true, returns the roles as embedded PyISIM entities. They will be stored in the "embedded" attribute. Defaults to false.
limit (int, optional): Defaults to 50.
Returns:
List[Person]: Search results
"""
if embedded:
embedded = ",".join(embedded)
ret = session.restclient.search_people(
profile_name,
atributos=attributes,
embedded=embedded or "",
buscar_por=by,
filtro=search_filter,
limit=limit,
)
personas = [Person(session, person=p) for p in ret]
if roles:
for p in personas:
p.get_embedded(session, roles=True)
return personas
def provisioning_policy(
session: "Session", name: str, parent: OrganizationalContainer
) -> List[ProvisioningPolicy]:
"""
Provioning Policy search
Args:
session (Session): Active ISIM Session
name (str): Provisioning Policy name
parent (OrganizationalContainer): Provisioning Policy business unit
Returns:
List[ProvisioningPolicy]: Search results
"""
wsou = parent.wsou
results = session.soapclient.search_provisioning_policy(
wsou, nombre_politica=name, find_unique=False
)
return [ProvisioningPolicy(session, provisioning_policy=p) for p in results]
def roles(session: "Session", by="errolename", search_filter="*") -> List[Role]:
"""
Role search
Args:
session (Session): Active ISIM Session
by (str, optional): LDAP Attribute to search by. Defaults to "errolename".
search_filter (str, optional): Filter to search by. Defaults to "*".
Returns:
List[Role]: Search results. Returns both Dynamic and Static Roles.
"""
soap = session.soapclient
results = soap.search_role(f"({by}={search_filter})", find_unique=False)
is_dynamic = [
any(filter(lambda i: i.name == "erjavascript", r.attributes.item)) # type: ignore
for r in results
]
return [
DynamicRole(session, rol=r) if is_dynamic[i] else StaticRole(session, rol=r)
for i, r in enumerate(results)
]
def activities(
session: "Session", by="activityName", search_filter="*"
) -> List[Activity]:
"""
Pending Activity search
Args:
session (Session): Active ISIM Session
by (str, optional): "requestId" or filters available in ISIMs REST API docs (activityId, activityName, serviceName, participantName). Defaults to "activityName".
search_filter (str, optional): Filter to search by. Defaults to "*".
Returns:
List[Activity]: Search results
"""
if by == "requestId":
results = session.soapclient.get_request_activities(search_filter)
return [Activity(session, id=a.id) for a in results]
else:
results = session.restclient.search_activity(
search_attr=by, search_filter=search_filter
)
return [Activity(session, activity=a) for a in results]
def access(
session: "Session", by="accessName", search_filter="*", attributes="*", limit=20
) -> List[Access]:
"""
Access search
Args:
session (Session): Active ISIM Session
by (str, optional): Defaults to "accessName".
search_filter (str, optional): Filter to search by. Defaults to "*".
limit (int, optional): Defaults to 20.
Returns:
List[Access]: Search results
"""
ret = session.restclient.search_access(
by=by, filtro=search_filter, atributos=attributes, limit=limit
)
accesos = [Access(session, access=a) for a in ret]
return accesos
def service(
session: "Session",
parent: OrganizationalContainer,
by="erservicename",
search_filter="*",
) -> List[Service]:
"""
Service search
Args:
session (Session): Active ISIM Session
parent (OrganizationalContainer): Service business unit
by (str, optional): LDAP attribute to search by. Defaults to "erservicename".
search_filter (str, optional): Filter to search by. Defaults to "*".
Returns:
List[Service]: Search results
"""
# ret=session.restclient.buscarServicio(by,search_filter,limit,atributos=attributes)
# servicios=[Service(session,service=s) for s in ret]
ret = session.soapclient.search_service(
parent.wsou, f"({by}={search_filter})", find_unique=False
)
servicios = [Service(session, service=s) for s in ret]
return servicios
def organizational_container(
session: "Session", profile_name: str, search_filter: str, by="name"
) -> List[OrganizationalContainer]:
"""
Organizational container search.
Profile names:
* bporganizations
* organizationunits
* organizations
* locations
* admindomains
Args:
session (Session): Active ISIM Session
profile_name (str): Organizational container profile name
search_filter (str): Filter to search by.
by (str, optional): Attribute to search by. Defaults to "name".
Returns:
List[OrganizationalContainer]: Search results.
"""
buscar_por = None if by == "name" else by
ret = session.restclient.search_containers(
profile_name, buscar_por=buscar_por, filtro=search_filter, attributes="dn"
)
ous = [OrganizationalContainer(session, organizational_container=ou) for ou in ret]
return ous
def account(
session: "Session",
ldap_search_filter: str,
service: "Service" = None,
) -> List[Account]:
args = {"filter": ldap_search_filter}
if service:
profile_name = session.soapclient.get_account_profile_for_service(service.dn)
args["profile"] = profile_name
results = session.soapclient.search_accounts(args)
return [
Account(session, account=r)
for r in results
if r["serviceName"] == service.name
]
else:
results = session.soapclient.search_accounts(args)
return [Account(session, account=r) for r in results]
|
from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from controllers.main_controller import MainController
from models.watch_only_wallet import WatchOnlyWallet
from views.modal_view import Message, Modal
class AddressListView(QFrame):
controller: MainController
watch_only_wallet: WatchOnlyWallet
incoming_balance: int
def __init__(self, controller: MainController, watch_only_wallet: WatchOnlyWallet):
super().__init__()
self.controller = controller
self.watch_only_wallet = watch_only_wallet
self.watch_only_wallet.incoming_balance_satoshis_changed.connect(
self.handle_incoming_balance_changed
)
self.size_policy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred)
self.size_policy.setHorizontalStretch(2)
self.setSizePolicy(self.size_policy)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.modal = Modal()
self.scroll = QScrollArea()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.scroll.setWidgetResizable(True)
self.scroll.setAlignment(Qt.Alignment.AlignTop)
self.list = QListWidget()
self.scroll.setWidget(self.list)
for address in self.watch_only_wallet.ui_addresses:
address_widget = QListWidgetItem(address.label)
icon_style = QStyle.StandardPixmap.SP_DialogYesButton if address.is_fresh else QStyle.StandardPixmap.SP_DialogNoButton
icon = self.style().standardIcon(icon_style)
address_widget.setIcon(icon)
self.list.addItem(address_widget)
self.new_address_name_input = QLineEdit()
self.new_address_name_input.setMaxLength(32)
self.new_address_name_input.setPlaceholderText("Label (Required)")
self.new_address_name_input.setAlignment(Qt.Alignment.AlignBottom)
self.new_address_button = QPushButton("Generate New Receiving Address")
self.new_address_button.clicked.connect(self.handle_new_address_button_clicked)
self.layout.addWidget(self.scroll)
self.layout.addWidget(self.new_address_name_input)
self.layout.addWidget(self.new_address_button)
def handle_new_address_button_clicked(self):
if self.watch_only_wallet.has_reached_gap_limit:
self.modal.show(Message.GAP_LIMIT_REACHED)
return
address_label = self.new_address_name_input.text()
if not address_label:
return
self.new_address_name_input.clear()
new_address = self.controller.derive_external_address(address_label)
address_widget = QListWidgetItem(address_label)
icon_style = QStyle.StandardPixmap.SP_DialogYesButton
icon = self.style().standardIcon(icon_style)
address_widget.setIcon(icon)
self.list.clearSelection()
self.list.insertItem(0, address_widget)
self.list.setCurrentRow(0)
self.list.setFocus()
@pyqtSlot(int)
def handle_incoming_balance_changed(self, incoming_balance: int):
for i, address in enumerate(self.watch_only_wallet.ui_addresses):
if not address.is_fresh:
list_item = self.list.item(i)
icon_style = QStyle.SP_DialogNoButton
icon = self.style().standardIcon(icon_style)
list_item.setIcon(icon)
|
# coding=utf-8
# import libraries
import pandas as pd
import streamlit as st
from annoy import AnnoyIndex
import os
import math
import warnings
from unidecode import unidecode
warnings.simplefilter("ignore")
# variables
all_name = "All"
# read df
@st.cache(allow_output_mutation=True)
def load_data():
df = pd.read_csv("sofifa2020.csv")
df['name'] = df['name'].apply(lambda name: unidecode(name))
df["positions_list"] = df["positions"].apply(lambda x: x.split(","))
df["contract"] = df["contract"].apply(
lambda x: str(x).split(",")[-1].strip())
return df
df = load_data()
league_list = list(df["league"].unique())
player_list = list(df["name"].unique())
default_leagues = [
"Spain Primera Division",
"Italian Serie A",
"French Ligue 1",
"English Premier League",
"German 1. Bundesliga",
"Holland Eredivisie",
]
positions_list = [
"LW",
"LS",
"ST",
"RW",
"LF",
"CF",
"RF",
"CAM",
"LM",
"CM",
"RM",
"CDM",
"LWB",
"LB",
"CB",
"RB",
"RWB",
"GK",
]
show_columns = ['name', 'photo_url', 'teams', 'league', 'contract', 'positions', 'age', 'height', 'weight',
'Overall Rating', 'Potential', 'Value', 'Wage', 'Release Clause', 'player_traits']
columns_to_compare = [
"Potential",
"Crossing",
"Finishing",
"HeadingAccuracy",
"ShortPassing",
"Volleys",
"Dribbling",
"Curve",
"FKAccuracy",
"LongPassing",
"BallControl",
"Acceleration",
"SprintSpeed",
"Agility",
"Reactions",
"Balance",
"ShotPower",
"Jumping",
"Stamina",
"Strength",
"LongShots",
"Aggression",
"Interceptions",
"Positioning",
"Vision",
"Penalties",
"Composure",
"DefensiveAwareness",
"StandingTackle",
"SlidingTackle",
"GKDiving",
"GKHandling",
"GKKicking",
"GKPositioning",
"GKReflexes"
]
################################################################
# css style
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
# Annoy (Approximate Nearest Neighbors Oh Yeah) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mmapped into memory so that many processes may share the same data.
# https://github.com/spotify/annoy
##################################################################
# sidebar filters
st.sidebar.title(":male-detective: Similar Player Detector")
st.sidebar.title(":pick: Filters")
st.sidebar.title("Target:")
target_player_name = st.sidebar.selectbox(
"Player:", [""] + player_list
)
target_player_name = target_player_name.strip()
st.sidebar.title("Similar Player Conditions:")
leagues = st.sidebar.multiselect(
"League:", [all_name] + league_list, default=default_leagues
)
age = st.sidebar.slider("Age:", min_value=15, max_value=50, value=30)
transfer_fee = 1000000 * float(
st.sidebar.text_input("Maximum Transfer Fee (€M):", "7.5")
)
wage = 1000 * float(st.sidebar.text_input("Maximum Wage (€K):", "50"))
top_K = st.sidebar.slider(
"K Top Similar Players", min_value=0, max_value=20, value=5
)
is_scan = st.sidebar.button("Detect")
st.sidebar.header("About")
st.sidebar.info(
"Similar Player Detecor finds the most similar players to the one you search (developed by <NAME> <<EMAIL>>)."
)
##############################################################################
# if detect button is clicked, then show the main components of the dashboard
def filter_positions(row, positions):
for p in positions:
if p in row["positions_list"]:
return True
return False
@st.cache(allow_output_mutation=True)
def scan(leagues, transfer_fee, wage, age):
df = load_data()
target_player = df.loc[df['name'] == target_player_name]
positions = target_player['positions'].iloc[0].split(",")
target_player_KPIs = target_player[columns_to_compare].to_numpy()[0]
df = df.loc[df['name'] != target_player_name]
df = df[df["age"] <= age]
if all_name not in leagues:
df = df[df["league"].isin(leagues)]
df = df[(df["Value"] <= transfer_fee) & (df["Wage"] <= wage)]
df["filter_positions"] = df.apply(
lambda row: filter_positions(row, positions), axis=1)
search_space = df.loc[df["filter_positions"] == True]
# calculate ANNOY
annoy = AnnoyIndex(len(columns_to_compare), 'euclidean')
search_space_array = search_space[columns_to_compare].to_numpy()
for i in range(search_space_array.shape[0]):
annoy.add_item(i, search_space_array[i, :])
annoy.build(n_trees=10)
indices = annoy.get_nns_by_vector(target_player_KPIs, top_K)
subset = search_space.iloc[indices, :]
return subset
if is_scan:
result = scan(leagues, transfer_fee, wage, age)
st.write(result[show_columns])
|
#!/usr/bin/env python
# coding: utf-8
# # Image Classifier
# Creating a classifier model for images
# Author: <NAME> (Chrono-Logical)
# Requirements:
# 1. tensorflow - view documentation to install
# 2. keras - view documentation to install
#
# (If you have ananconda installed, you can simply use anaconda navigator to install both packages)
# In[1]:
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import pickle
from matplotlib import pyplot as plt
import cv2
import os
# In[2]:
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# Model class to create a deep CNN classifier with following functions:
# 1. normalize - to normalize input feature data
# 2. build_model - to define the model
# 3. train - to train the model
# 4. save_model - to save the model and model weights
# 5. visualize - to view metrics graphically
# 6. load_model - to load a saved model
# 7. transform_image - to resize all images to required size
# 8. test - to run classifier on particular data
# CNN Model Characteristics:
# 1. Sequential model
# 2. 3 Convolutional layers - 2D Convolution and 2D Max Pooling
# 3. 2 hidden layers - Dense
# 4. 1 output layer - Dense
# In[3]:
class KerasModel():
def __init__(self):
self.categories = ["Safe", "Confidential"]
self.X = pickle.load(open(os.path.join(os.path.expanduser("~"), "ShellPrivacyFilterDemo", "data", "Image Dataset", "features.pickle"), "rb"))
self.y = pickle.load(open(os.path.join(os.path.expanduser("~"), "ShellPrivacyFilterDemo", "data", "Image Dataset", "labels.pickle"), "rb"))
def normalize(self):
self.X = self.X/255.0
def build_model(self):
self.model = Sequential()
self.model.add(Conv2D(32, (3, 3),
activation = "relu",
input_shape = self.X.shape[1:]))
self.model.add(MaxPooling2D(pool_size = (2, 2)))
self.model.add(Conv2D(64, (3, 3),
activation = "relu"))
self.model.add(MaxPooling2D(pool_size = (2, 2)))
self.model.add(Conv2D(64, (3, 3),
activation = "relu"))
self.model.add(MaxPooling2D(pool_size = (2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(128,
activation = "relu"))
self.model.add(Dense(128,
activation = "relu"))
self.model.add(Dense(1,
activation = "sigmoid"))
self.model.compile(loss = "binary_crossentropy",
optimizer = "adam",
metrics = ["accuracy"])
def train(self):
self.history = self.model.fit(self.X, self.y, batch_size = 32, epochs = 50, validation_split = 0.1)
def save_model(self):
self.model.save(os.path.join(os.path.expanduser("~"), "ShellPrivacyFilterDemo", "backend", "models", "keras_model"))
with open(os.path.join(os.path.expanduser(), "ShellPrivacyFilterDemo", "backend", "models", "model_history"), 'wb') as file_pi:
pickle.dump(self.history.history, file_pi)
def visualize(self):
print(self.history.keys())
plt.figure(1)
plt.plot(self.history['accuracy'])
plt.plot(self.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc = 'upper left')
def load_model(self):
self.model = load_model(os.path.join(os.path.expanduser("~"), "ShellPrivacyFilterDemo", "backend", "models","keras_model"))
self.history = pickle.load(open(os.path.join(os.path.expanduser("~"), "ShellPrivacyFilterDemo", "backend", "models","model_history"), "rb"))
def transform_image(self, image_path):
img_size = 50
img_array = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
img_array = img_array/255.0
new_array = cv2.resize(img_array, (img_size, img_size))
return new_array.reshape(-1, img_size, img_size, 1)
def test(self, image_path):
image = self.transform_image(image_path)
prediction = self.model.predict([image])
prediction = list(prediction[0])
return self.categories[prediction.index(max(prediction))]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 21:57:56 2021
@author: cui_hao
"""
# 转发网络
#server_crawl_repost.py
import pandas as pd
import os
#from datetime import datetime, timedelta
import random
import requests
from bs4 import BeautifulSoup
import re
import time
#cwd = "/mnt/sdb1/cuihao/keywords"
cwd = '/Users/cui_hao/Documents/GitHub/Weibo_thesis/keywords0717to0917/clustered grouped'
os.chdir(cwd)
# 要非常注意directory!!!
type_group_cluster = "group2_cluster1"
"group3_cluster2"
#"group3_cluster3"
#"group2_cluster2"
#"group1_cluster3"
#"group1_cluster2"
#"group1_cluster1"
#"group2_cluster3"
#
#
#"group2_cluster1"
#"group1_cluster3"
text_file = open( type_group_cluster + ".txt", "r")
groups_clusters = text_file.read().split('\n')
text_file.close()
groups_clusters = list(filter(None, groups_clusters)) #576 需要filter None!!!!
print(len(groups_clusters)) #408 #576 #410
#social_hashtags = groups_clusters
today_cookie_mobile = "M_WEIBOCN_PARAMS=lfid%3D102803%26luicode%3D20000174; _T_WM=413a515abcf58f8344099ca33f07ae3b; SSOLoginState=1638380969; SUB=_2A25Mo8X5DeRhGeFN71sS8SzFwjWIHXVsb-uxrDV6PUJbktB-LWankW1NQAOoWi_FAweL0o78jzR7U3V1fYJ5Q2Jt; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27"
#"SSOLoginState=1638380969; SUB=_2A25Mo8X5DeRhGeFN71sS8SzFwjWIHXVsb-uxrDV6PUJbktB-LWankW1NQAOoWi_FAweL0o78jzR7U3V1fYJ5Q2Jt; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27"
#"M_WEIBOCN_PARAMS=lfid%3D102803%26luicode%3D20000174; MLOGIN=1; SSOLoginState=1638380969; SUB=_2A25Mo8X5DeRhGeFN71sS8SzFwjWIHXVsb-uxrDV6PUJbktB-LWankW1NQAOoWi_FAweL0o78jzR7U3V1fYJ5Q2Jt; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; _T_WM=74697775660"
#"_T_WM=9fc10bfbf501ae65832ebcd520bf6ad8; SSOLoginState=1637015847; SUB=_2A25MlpF3DeRhGeFN71sS8SzFwjWIHXVseD8_rDV6PUJbktCOLVDxkW1NQAOoWn_ZS9a95VFuWnckhtXgLWHjA2v2; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"SSOLoginState=1637015847; SUB=_2A25MlpF<KEY>; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; _T_WM=21d48925b8312c9b531054204ea35983; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"_T_WM=21d48925b8312c9b531054204ea35983; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#" _T_WM=d5300f6f3ad491560416e9f074471fc4; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"_T_WM=ffd86b775690f6937a49ccead078b0d2; SCF=AtoELGQ3lZ0Lbib6mi1TgsaEm9--Iee8UUtht0oMX-Eo4FrWD9eFP5Lj0mKXzQzi5vvMkJKum6-jToRrYn6N3io.; SUB=_2A25MbASQDeRhGedP7VYY-CjEzT-IHXVvrqzYrDV6PUJbktAKLXnakW1NX0HEZ2UTBu73i93r9x0V3fanfgXmgMAe; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5k5xJReM75SqTjq-AHoJug5NHD95QpeKqX1Knc1hq0Ws4Dqcj.i--fiK.NiK.pi--4i-2EiKLhi--Xi-i8i-8si--fi-88i-z7; SSOLoginState=1634235584"
#"_T_WM=d5300f6f3ad491560416e9f074471fc4; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"_T_WM=d5300f6f3ad491560416e9f074471fc4; SSOLoginState=1629216242; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"SSOLoginState=1629216242; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; _T_WM=1e0484d815eea099efb0ebb89281fe76; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#"SSOLoginState=1629216242; SUB=_2A25MH62hDeRhGeFN71sS8SzFwjWIHXVv4zPprDV6PUJbktB-LWvDkW1NQAOoWmh4VoDCoPnucODU2A7J1yIs8xmR; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; _T_WM=1e0484d815eea099efb0ebb89281fe76; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA."
#" _T_WM=1e0484d815eea099efb0ebb89281fe76; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA.; SSOLoginState=1627897633; SUB=_2A25MA89xDeRhGeFN71sS8SzFwjWIHXVvD9E5rDV6PUJbktCOLRjskW1NQAOoWgPm3__D35Qc6vnOTgQ9Q72jCtpi; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; ALF=1630489279"
#"M_WEIBOCN_PARAMS=luicode%3D20000174; _T_WM=1e0484d815eea099efb0ebb89281fe76; SCF=Aha4XxHMg9AQ8pCP6aqmXzBmnA6kTsunGvm4vTCYJxelLV9quW9Q5FqMLHPUxa03Lpv-piijUf106YnKdB5QthA.; SSOLoginState=1627897633; SUB=_2A25MA89xDeRhGeFN71sS8SzFwjWIHXVvD9E5rDV6PUJbktCOLRjskW1NQAOoWgPm3__D35Qc6vnOTgQ9Q72jCtpi; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; ALF=1630489279"
# "_T_WM=20275767258; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFebFUdhj4ecuLMYZc.yuio5NHD95QNe0B4e02E1K.4Ws4Dqcj_i--4i-82iKysi--ciK.Ni-27i--Ni-i8i-2pi--fi-z7iK.pi--Xi-i8i-27; SCF=AgbDFxhQEEGFVTUwnhqpc9Q-tSqinhWHs850o7JgLTBkB_7MfrOD0_rFfo4sqlFYzCdtrPLt1vQgANvJJR0hmqc.; SUB=_2A25y0nUCDeRhGeFN71sS8SzFwjWIHXVuPRtKrDV6PUJbktANLVHSkW1NQAOoWpaPC1jemsrAt2Ea-RYRhaSwfVv8; SSOLoginState=1607861586"
#"_T_WM=7b9145612605f10755dec998c03c4377; ALF=1605624566; SCF=AgbDFxhQEEGFVTUwnhqpc9Q-tSqinhWHs850o7JgLTBkCH1uaH2vxHTl-suSjYoGYYR-DoADC6V-lD3KNXaszp4.; SUB=_2A25yiNvzDeRhGedP7VYY-CjEzT-IHXVucuW7rDV6PUJbktAKLRb3kW1NX0HEZ05A7KVo8P_bY9k7CWtmFEg3dCE4; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5k5xJReM75SqTjq-AHoJug5NHD95QpeKqX1Knc1hq0Ws4Dqcjbi--NiKyhi-8Fi--fiKnfi-8hi--Xi-zRi-iF-c8bMJLVqBtt; SUHB=079yb-W6s5RZiP; SSOLoginState=1603054499"
#"_T_WM=7b9145612605f10755dec998c03c4377; SCF=AgbDFxhQEEGFVTUwnhqpc9Q-tSqinhWHs850o7JgLTBkYHrh10kRlgiqi2iZO7G7Sg7L8sHhlWrV10QenI9ty_c.; SUB=_2A25yiCWmDeRhGedP7VYY-CjEzT-IHXVuc0vurDV6PUJbktANLVfkkW1NX0HEZ6GVXPvZtyjbA1G36a2DkqQqIERA; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5k5xJReM75SqTjq-AHoJug5JpX5K-hUgL.Fo2pSoB41hqRSoe2dJLoIEzLxKML12eLB-zLxK-L1h-LB-eLxKBLBonLB.iKqgiEIg4y; SUHB=0P1dxjbb6Q7TXI; SSOLoginState=1603032566; ALF=1605624566"
cook_mobile = {"Cookie": today_cookie_mobile}
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
#"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36"
header = { 'User-Agent' : user_agent}
# https://weibo.cn/repost/JbCmgg3ev?uid=2009113272&rl=1
def GetMobileUrl(web_link):
rand_letter = web_link.split("?")[0].split("/")[-1]
userid = re.compile(".*/(\d+)").match(web_link).group(1)
mobile_url = "https://weibo.cn/repost/" + rand_letter + "?uid=" + userid + "&rl=1"
return mobile_url
def getUrlList(Mobile_url): #should be a string, the last part of the url
"""
获取一个根的所有的转发页面链接
:param url: 主页面链接
:return: 所有评论链接
"""
cook_mobile = {"Cookie": today_cookie_mobile}
form_action = Mobile_url.split("/")[-1]
#form_action = form_action_url
#url = comment_page + form_action #string可以相加
time.sleep(random.uniform(0.5, 3.5)) #randint
html = requests.get(Mobile_url, cookies=cook_mobile, headers = header).content #
soup = BeautifulSoup(html, "lxml") #html.parser
form = soup.find("form", attrs={"action": "/repost/" + form_action})
if form is None:
url_list = [Mobile_url]
else:
#a = form.find('a').get('href')
#b = a[0:len(a)-1] #页面的第一部分
b = Mobile_url + "&page="
c = form.find("div").text.split("/")[1]
d = len(c) -1
e = c[0:d] #评论的页数
url_list = []
for i in range(1,int(e) + 1):
url_list.append(b + str(i))
url_list.reverse()
return url_list
####################################
def GetRepost(all_url):
cook_mobile = {"Cookie": today_cookie_mobile}
try:
weiboRepost=open(str(time.strftime('%Y%m%d%H%M%S',time.localtime()))+"repost.csv", 'w')
title = '原用户名\t原内容\t原发布时间\t转发用户名\t转发用户id\t转发内容\t点赞数\t转发时间\t转发来源\n' #\
print(title.replace("\n",""))
weiboRepost.write(title)
html = requests.get(all_url[-1], cookies=cook_mobile, headers = header).content
soup = BeautifulSoup(html, "lxml")
r = soup.findAll('div', attrs={"class": "c"})
origin_username = soup.find("div", attrs={"id": "M_"}).find("a").text
origin_content = soup.find("div", attrs={"id": "M_"}).find("span", attrs={"class": "ctt"}).text
origin_time = soup.find("div", attrs={"id": "M_"}).find("span", attrs={"class": "ct"}).text
#for i in all_url[20:]:
for i in all_url:
time.sleep(random.uniform(0.5, 3.5)) #randint (3,8)
if all_url.index(i) > 0 and all_url.index(i) % 30 == 0:
time.sleep(100)
html = requests.get(i, cookies=cook_mobile, headers = header).content
soup = BeautifulSoup(html, "lxml")
r = soup.findAll('div', attrs={"class": "c"}) ##???
for j in r[4:]: #4, should be 4; 3 这里应该是3;
if j.find_all('a',href=re.compile("/u")) != []:
#item = j.find_all('a',href=re.compile("/u")) # it is a list, length is 1.
#userid = item[0].get("href").split("/")[2]
########## Wrong!!!!!不是userid!!!!!!!!!!错了!!!!!!
userid = re.compile(".*/(\d+)").match(str(j.find_all('a')[0])).group(1)
#username = j.text.split('\xa0')[0].split(':')[0]
rep_username = j.find("a").text
repost_content = j.text.split('\xa0')[0] #.split(':')[1]
#numlikes = j.text.split("\xa0")[-3]
if len(j.text.split('\xa0')) != 1:
numlikes = j.text.split('\xa0')[1]
repost_time = j.text.split('\xa0')[2]
from_device = j.text.split('\xa0')[3]
line = origin_username + "\t" + origin_content + "\t"+ origin_time +"\t" + rep_username + "\t" + userid + "\t" + repost_content + "\t" + numlikes + "\t" + repost_time + "\t" + from_device + "\n"
#line = userid + "\t" + username + "\t" + repost_content + "\t" + numlikes + "\t" + repost_time + "\t" + origin + "\n"
print(line.replace("\n", ""))
#weiboRepost.write(line)
if j.find_all('a',href=re.compile("/u")) == [] and j.text.split('\xa0')[0] !="" : #type(re.compile(".*/(\w*\d*)").match(str(j.find_all('a')[0]))) == re.Match and
userid = re.compile(".*/(\w*\d*)").match(str(j.find_all('a')[0].get("href"))).group(1)
rep_username = j.text.split('\xa0')[0].split(':')[0]
if len(j.text.split('\xa0')) != 1:
repost_content = j.text.split('\xa0')[0] #.split(':')[1]
numlikes = j.text.split('\xa0')[1]
repost_time = j.text.split('\xa0')[2]
from_device = j.text.split('\xa0')[-1]
line = origin_username + "\t" + origin_content + "\t"+ origin_time +"\t" + rep_username + "\t" + userid + "\t" + repost_content + "\t" + numlikes + "\t" + repost_time + "\t" + from_device + "\n"
print(line.replace("\n", ""))
#weiboRepost.write(line)
print(line.replace("\n", ""))
#if href.find("javascript:void(0)") != -1:
#line = str(index) + "\t" + title + "\t" + hot + "\t" + date +"\t"
#写入文件
weiboRepost.write(line)
#考虑个性域名,字母在前,数字在后, 数字也可能不出现。
weiboRepost.close()
#except socket.timeout:
except Exception:
print("Exceptions")
except urllib.error.URLError:
print('The handshake operation timed out')
#except http.client.RemoteDisconnected:
# print("Remote end closed connection without response")
#except http.client.IncompleteRead:
# print("IncompleteRead")
except NameError:
print("Name Timeout is not defined")
#except error.URLError as err:
# print("Error description:",err.reason)
#except error.HTTPError as err:
# print("Error description:", err.reason)
except ConnectionError:
print("Error description: Socket error timed out.")
###########################################################################################
#group1_cluster3
# 三浦春马10年前写给自己的信 11, not completed, 19th link
# 31省区市新增确诊16例 13, not, 74th link
#group3_cluster3
# 金鹰奖宣传片: 14
# 奶奶陪渐冻症孙女参加中考: 23
# 上海遛狗 30
# 秦昊东升旅行社导游 35
# 特朗普自夸疫情发布会收视率超高 44 maybe not complete
# 安徽巢湖中庙寺被淹 54 not completed
# '受过军训的饺子' 58
# 路小北没有救回哥哥 66 not completed
# 录3年求婚视频男主回应 78 not completed, 23th link
# 周杰伦直播 87 start from 98th link
#for keyword in groups_clusters[14:100]: # try just one element in the list #social_hashtags:
df_up_hashtag = ['法国室内公共场所强制佩戴口罩',
'汽车日租价降幅达50%',
'南特大教堂火灾或系人为',
'排球少年完结',
'有剧情的红绿灯',
'广发证券保荐资格被暂停',
'航拍恩施堰塞湖',
'浪漫轨迹',
'澳军准航母在南海与中国海军对峙',
'大连全市现场招聘活动暂停', #broke from 7th
'电排站回应采购360吨黄豆防汛',
'王者世冠小组赛收官',
'71岁老人5次高考终圆梦',
'大连志愿者防护服拧出汗水',
'瑞幸称被立案调查不会影响门店运营',
'疫情中处在十字路口的旅游业',
'杨英格回应退赛',
'王者世冠应援歌曲',
'新疆新增22例本土病例',
'大连向足协申诉',
'严浩翔翻唱安静',
'北京重启消费季',
'博茨瓦纳北部出现约350头非洲象尸体',
'偶像梦幻祭', #broke from 38th 源数据有问题,已改正
'四川乐山洪峰过境',
'第一创业', # too old in history, discard
'马刺无缘季后赛',
'失去家的玩具们',
'西班牙人续约武磊',
'国内油价上调',
'7月稀土出口下降近70%',
'全国各地点亮地标为科比庆生',
'自己身上的神奇点',
'2020世界城市名册',
'七夕蜜桃约会妆',
'深圳特区40年',
'六部门发文治理未成年人网络环境',
'给闺蜜安利爱豆的样子',
'巴托梅乌',
'孟子坤世界末日',
'FPX拿到赛点',
'东京街头的繁华',
'合肥冰雹',
'滴水成画',
'忍不住代入孙弈秋',
'青岛凤凰音乐节', ##2019年的不要
'任嘉伦薇娅直播',
'股票新基金募资降3成',
'LCK冒泡赛',
'中超外援倒钩进球',
'法国黄马甲示威游行在疫情中卷土重来',
'吴彤时代少年团合影',
'空军空降兵最新宣传片',
'10位铁骑队员3分钟徒手推走抛锚车',
'兵哥哥真人版演示手枪如何工作',
'车艺照大赛',
'军训照挑战',
'我与冲浪只差个自拍杆',
'电子竞技莫得感情',
'白昊天反击江子算',
'趣头条整改',
'25省份上半年GDP出炉',
'湖人战胜魔术',
'宋祖儿喊话跟拍',
'张雨绮撞衫Jennie',
'意大利海上救援犬组织30年救起上千人',
'掘金vs快船',
'孟美岐跳霸王别姬']
############---------------------
group2_cluster1_lower = ['趣头条道歉',
'乌鲁木齐地铁1号线停运',
'315曝光趣头条虚假广告',
'山东即墨查处问题海参',
'金盆洗手之后的吴邪',
'世界死亡病例超58万',
'新疆新增本土病例1例',
'上海全覆盖检查汉堡王',
'香港经深港口岸入境须持核酸证明',
'中国驻洛杉矶总领馆提醒当心山谷热',
'美国凤凰城地区3中国公民患山谷热',
'10岁男孩用零花钱给抗洪战士买糖',
'山东连夜派出调查组赶赴即墨',
'新疆昌吉公交暂停营运',
'江湖预告',
'长征五号火箭垂直转运至发射区',
'长江2020年第2号洪水',
'2020港姐15强诞生',
'扎克伯格连线福奇',
'美国新冠感染病例超355万',
'宁静对郑希怡说我不希望失去你',
'乘风破浪的姐姐三公分组',
'消防员拿第一获准外出后的步伐',
'韩国棋手AI作弊被判监禁',
'新疆专升本考试延期举行',
'乌鲁木齐机场航班大量取消',
'国务院联防联控机制联络组离鄂返京',
'申长友', #no such
'李宇春自评级X',
'49名退役军人一天内赶回家乡抗洪',
'电影院复映片单',
'一中学生连获三年全国青创赛奖项',
'日本6月外国游客仅约2600名',
'舞蹈生迷惑行为图鉴', #no such
'西班牙将扑杀10万只养殖貂',
'中国全球化品牌50强榜单',
'巴菲特4个月从苹果赚400亿美元',
'兽药店库房查出土霉素原粉',
'当两个孤独的人相遇',
'岳云鹏沙溢蹭饭版无价之姐',
'台军直升机坠毁画面',
'Haro的盲僧',
'北京7月21日进入主汛期',
'下周北方将进入主汛期',
'青岛即墨区自然资源局副局长被查',
'校方回应中学生连获三年青创赛奖',
'淮河发生2020年第1号洪水',
'乌鲁木齐已启动疫情应急响应预案',
'江西鄱阳中洲圩决口合龙',
'美国加州幼儿园已报告近1000例确诊']
group2_cluster2_up = ['北京将稳妥有序推进电影院恢复开放',
'安徽32条河湖超警戒水位',
'暮白首大结局',
'莫高窟单日游客最大限量调整为50%',
'国防科大版无价之姐',
'影院重启首日大学生连看四场',
'全国电子烟市场首张罚单',
'官方回应西安足球场禁止踢球',
'北京电影院开启预售',
'中国全网5G用户破亿',
'福奇为美职棒新赛季开球',
'深圳暖夜灯柱',
'福布斯全球品牌价值100强',
'Somi舞台',
'乐队的夏天可太帅了',
'吴前好厉害',
'香港新增确诊80例',
'水上列车驶过天空之镜',
'浙江玉环全面排查阳台违规改造',
'辽宁队淘汰新疆队',
'韩国试射新型弹道导弹',
'微软',
'用玩具还原大话西游经典场景',
'璇玑花式哄司凤',
'唱过夏天', #26th broke
'Otto解说',
'北京电影院上座率上限调至50%',
'缓解头痛的小方法',
'时刻保持微笑的方法',
'中国参加武器贸易条约缔约国大会',
'被孙雯雯气死',
'新西兰流浪猫与总理竞争年度人物',
'当代爱情图鉴',
'七夕蛤蟆',
'居民回应豪华中学部分设施被拆',
'山东荣成休渔期非法捕捞调查',
'原路返回的可爱小鹿',
'风月不相关将拍剧',
'白色月光大结局',
'2020高考暖心结局',
'韦神解说',
'S10资格赛',
'中国96B坦克与俄T72坦克同场竞速',
'亚马逊无人机获批',
'深圳进入强制垃圾分类时代',
'火箭雷霆抢七',
'信条解析',
'易建联寄语年轻球员',
'孙弈秋有骨气',
'中国每日航班量恢复到疫情前九成',
'76岁大爷勇救200斤溺水小伙',
'张伯礼获奖后第一天',
'日本女足运动员加盟男队',
'兰芊翊背锅',
'猫先拿回来 手机可以慢慢等',
'湖南援鄂医生送儿子武大报到',
'寝室环境大赛',
'美印将签关键军事协议共享信息',
'郝帅撒娇',
'美国西部山火持续肆虐',
'全国多地景区门票降价',
'统计局回应房地产市场回暖',
'15种语言版你曾是少年',
'第11号台风红霞',
'泰国将提供270天旅游签证']
group3_cluster2_up = ['3岁女童凌晨独自上街小伙一路护送',
'三十而已和二十不惑串场',
'三十而已的男性角色',
'中国女排版无价之姐',
'优马铁子',
'信条女主190',
'健身教练的视角',
'关晓彤四分之三减龄妆',
'准研究生虐狗事件狗主人不接受道歉',
'剑仙回归',
'剑网3十一周年',
'北京允许举办500人以下体育赛事活动',
'北大回应留守女生报考考古专业',
'口罩的防晒效果有多好',
'吉野家将关闭含中国市场内150家门店',
'唐国强老师演过的角色',
'唯一的好男人是许子言',
'大连女子隆胸去世医方承担完全责任',
'姜贞羽恋情',
'孙子跪地给七旬奶奶拍写真',
'宁静李斯丹妮抢王霏霏',
'张玉环案或申请约700万国家赔偿',
'张艺兴 嘲笑我可以不要嘲笑努力',
'把男朋友叫做普通朋友',
'抹茶杏仁豆腐奶冻',
'教育类硕士毕业生免试认定教师资格',
'昊辰卷土重来',
'杭州失踪女子居住小区居民献花祭奠',
'林有有段位',
'武汉东湖之眼摩天轮',
'武汉菜市场卖野生青蛙',
'父亲为弥补儿子亲手改造房间',
'白桃乌龙青森冰沙',
'羊城十二时辰',
'羊能长得多潦草',
'肖战自拍',
'肯塔基州两支游行队伍现场对峙',
'阚清子说朱一龙神秘',
'阻止性侵被咬耳男子称不后悔',
'阿里巴巴注册新公司京西']
group2_cluster3_up = ['1元抗癌厨房墙上留下不少电话号码',
'3岁女孩被爸妈喂到70斤当吃播赚钱',
'7岁女童商场偷拿玩具亲妈报警',
'7省市将有大到暴雨',
'AI技术复原91年前黑白视频颜色',
'Angelababy回复章子怡',
'TFBOYS演唱会单人机位',
'iPhone12 Pro玻璃后壳曝光',
'一只装满水的气球扎破瞬间',
'乐山大佛脚趾露出',
'乘风破浪的姐姐复活换位战',
'乘风破浪的铁三角',
'入狱4年手机被经办民警私用',
'凌霄和李尖尖的CP名',
'刘心悠 我喜欢谣言',
'厦门两名高级警长同日殉职',
'吴镇宇念王一博粉丝祝福语',
'喂鲤鱼当消遣的黑天鹅',
'四川宜宾通报路面塌陷',
'多本爆特朗普黑料新书销量猛增',
'女子被家暴高速收费站求助获救',
'孔雀溜进图书馆',
'学历鄙视链真存在吗',
'宁静爸爸好年轻',
'张朝阳称自己每天只睡4个小时',
'张雨绮舞蹈进步好多',
'张馨予呼吁不要对女演员胖瘦太苛刻',
'当你发现队友在谈恋爱',
'快乐源泉蔡国庆',
'想象中给骑车的男友打伞',
'我国新一轮降雨将启程',
'我有一个梦想演讲57周年美国民众游行',
'新疆宣布全面恢复正常生产生活秩序',
'新疆新增17例本土病例',
'日系初恋心动妆',
'明年将禁用不可降解塑料袋',
'景区上演现实版鱼跃龙门',
'杜华 我自己打了自己的脸',
'森碟腿部线条',
'水果姐产后身材',
'江西入室杀两人嫌犯又杀一人',
'湖人时隔十年再进西决',
'特朗普成为美国共和党总统候选人',
'特朗普承认曾淡化新冠疫情严重性',
'特朗普计划在白宫为弟弟举行葬礼',
'猛龙战胜凯尔特人',
'瑞幸咖啡单店现金流已转正',
'璇玑切大号',
'男子5000米世界纪录告破',
'男子口罩内藏作弊器考科目一',
'白天开饭店晚上挖地道盗文物',
'硝酸铵是什么',
'童瑶气质',
'紫禁城600年一见如故',
'罗冠军称梁颖方愿意公开道歉',
'美国3000多专家联名挺福奇',
'美国新冠肺炎确诊超491万例',
'美国现十年来最大破产潮',
'药水哥打拳',
'谢霆锋谈年轻艺人排场大',
'钟晓芹钟晓阳好甜',
'钟晓阳暖男',
'陈养鱼许放炮梁海王',
'韩国植物园立安倍下跪谢罪雕像',
'顾佳被骗',
'香港教育局称教科书不应出现违法内容',
'骑手摔残废了手机还在自动接单']
group1_cluster1_up = ['万只白鹭田野中齐飞',
'全球单日新增新冠肺炎近30万例',
'大波浪改编爱情买卖',
'民警创作歌曲揭10种校园诈骗',
'第12个全民健身日',
'詹姆斯大帽威少',
'雨后彩虹照']
group1_cluster2_up = ['30吨橘子散落高速没人哄抢',
'70岁宝藏奶奶靠时装火出圈',
'LGD状态',
'上海大学开学礼物送干湿垃圾桶',
'上海整治养犬行为违规者当场罚款',
'仪陇通报400字官方回复错4字',
'哪一刻决定开始养猫',
'四行仓库客流量猛增两倍',
'大连人vs恒大',
'日本铲屎官记录自家猫咪叫声',
'杭州失踪女子尸体在小区化粪池找到',
'胡明轩三分球']
group1_cluster3_up = ['李遐怡新歌MV', '路小北和许蔚摊牌']
group2_cluster1_up = ['14款游戏APP存在隐私问题',
'2020未来科学大奖获奖人揭晓',
'9月1日起成都天然水域全面禁捕10年',
'CLC新歌MV',
'S10抽签仪式嘉宾',
'一颗高楼大小行星将飞过地球',
'下半年最想看到的电影',
'中国方便面海外买家翻倍',
'云南今年来野生菌中毒已致12死',
'井底蛙顺着绳子奋力往岸上爬',
'八月星空',
'再无法超越的经典角色',
'分享欲有多重要',
'刘雨昕金丝眼镜', #53 broke
'北京学校食堂不得制售冷食生食',
'北京环球影城明年5月开园',
'区块链金融顶层设计出台',
'卫健委明确抑郁症防治四类重点人群',
'印度连续两日新增确诊破9.5万例',
'去新疆旅游无需核酸检测及隔离',
'同济大学军训教官跳女团舞',
'吹玻璃师傅工作的样子',
'大连10岁女童被害案法院判赔128万',
'女艺人能有多敢说',
'宁静看张雨绮出神忘选手机',
'小鸟遇人僵直脖子如标本',
'山西襄汾坍塌饭店老板被刑拘',
'山顶小学摇滚乐队将开演唱会',
'巴特勒40分',
'张玉环前妻称他还欠我一个抱',
'彭于晏像拉黄包车的车夫',
'德云社小剧场将恢复演出',
'德国小镇麦田怪圈',
'成都300年桂花巷内桂花树全被砍',
'救人医学生回应获奖1万元',
'无价之邪',
'杨幂瑞丽25周年封面',
'核电站海底种珊瑚',
'桂花椰汁西米糕',
'武汉天空像羊群奔跑的云团',
'武汉百万大学生返校',
'民间大神打脸现场',
'湖人球员众生相',
'独居老人家中摔倒4天靠敲盆获救',
'王源MV创意',
'王菲K歌直播',
'瑞士小镇下起巧克力雨',
'硬糖少女首张EP',
'罗云熙仲夏光影大片',
'美国一夏令营发生集体感染',
'美国俄勒冈州染成红色',
'胡一天黄子韬 蜡笔小新',
'菅义伟正式就任日本首相',
'詹姆斯季后赛胜场数历史第一',
'豆腐脑店老板作诗贴满墙',
'赵丽颖哭的镜头',
'还没从琉璃走出来',
'这蜡烛吹得挺突然的',
'逃离家暴有多难',
'金晨郁可唯表情包',
'陈木胜追思会',
'隐秘的角落 切片式混剪',
'零食托',
'霍格沃茨学院妆',
'青春芒果夜阵容', #青春芒果节官微 broke
'韩国近七成80岁以下老人想工作',
'香港新增新冠肺炎确诊113例',
'黄子韬加盟说唱新世代']
group1_cluster1_down = ['写信力挺援港医护的港警收到回复了',
'创业板注册制首批企业8月24日上市',
'制造业PMI连续5个月在临界点以上',
'北京企业乱倒建筑垃圾最高罚100万',
'多肉乌龙青提沙',
'女性科技人力资源比例进一步提升',
'安徽解除紧急防汛期',
'少儿读物类图书均价上涨超2成',
'线下独处线上热闹成常态',
'西藏加林山岩画上的动物世界',
'连狗子都会玩滑板了',
'首批火星地形地貌中文推荐译名']
group1_cluster2_down = ['00后内向男孩成国内首位手语翻译',
'为战疫功勋护航21车队形是最高规格',
'抚顺3.0级地震',
'杭州金边云',
'现在军训的才艺技能有多丰富',
'约基奇三双创历史',
'纳达尔退出2020年美网',
'蓝色格纹穿搭',
'西亚卡姆脚踢对手',
'遵义欧亚医院总经理获刑20年',
'重启特化']
group1_cluster3_down = ['KPL韩信星元皮肤',
'SpaceX首批星际飞船乘客',
'亲历者讲述国航航班突降千米',
'大坂直美罢赛',
'孤day打歌蛤蟆',
'当给宠物加上特效',
'影子银行规模三年缩减16万亿',
'日本新内阁名单公布',
'毛阿敏毛不易神仙合唱',
'滞留乌市人员核酸检测合格后可离开',
'用狗狗最爱的词语讲故事',
'给李现P衣服',
'美46万人庆摩托车节26万人确诊',
'鄱阳县境内水位呈下降趋势',
'长城上眺望北京CBD',
'黎巴嫩司法部长辞职']
group2_cluster2_down = ['TFBOYS合唱Heart',
'九旬大爷勇救落水男童',
'北部湾大量珊瑚白化面临死亡',
'台风过境吉林屋顶被掀树被刮倒',
'嗨学网退费难',
'四川强降雨结束',
'国家卫健委18人专家团队抵达大连',
'国庆酒店机票价格创5年新低',
'安徽淮河王家坝关闸',
'工信部要求严查SDK违规收集用户信息',
'房地产的广告有多奇葩',
'把悬崖村搬下悬崖',
'林俊杰孙燕姿新歌MV',
'武磊西乙首球',
'江水已从乐山大佛脚趾退去',
'现存中华老字号近一半持续亏损',
'美国至少24州报告高校新冠病例',
'雅润等5款消毒湿巾虚标酒精浓度']
group3_cluster3_down = ['1200万台第三方产品支持鸿蒙',
'2岁女童10分钟横渡沾天湖',
'KPL阿古朵首秀',
'Mlxg手气',
'三星电子关闭在华最后一家电脑厂',
'上万颗钉子做成的立体山水画',
'中国新冠疫苗在阿联酋获紧急批准',
'乌克兰前总理季莫申科感染新冠肺炎',
'周峻纬单场21分',
'张雨绮 抱你到天亮mua',
'恋爱脑的反常规操作',
'杨超越工作室公告',
'段振宇救姜小果',
'特朗普考虑大选后撤换国防部长',
'王一博霸总式选人',
'王岳伦表白李湘王诗龄',
'看到情侣的我',
'综艺里最热闹的场面',
'这年头水居然会敷面膜',
'迪士尼在逃生物',
'钟汉良小宝 什么偶像剧情节',
'高福接种实验型新冠病毒疫苗']
group3_cluster2_down = ['DNA检测通告吓出高空抛物肇事者',
'S7冒泡赛兮夜用卡萨丁淘汰iG',
'iPhone11首次在印度生产',
'中国代表在联大严厉驳斥美方无端指责',
'刘永坦捐出最高科技奖800万奖金',
'加满一箱油将多花3.5元',
'南开录取通知书送两粒莲花种子',
'吴尊友称未来出现疫情是一种常态',
'大连地铁回应老人无健康码乘车受阻',
'安倍晋三正式宣布辞职',
'家中起火姐姐带9岁弟弟教科书式避险',
'当老师让小朋友带一种蔬菜',
'招生办通报专升本考试疑似泄题事件',
'日方回应韩国立安倍下跪谢罪雕像',
'时代少年团微电影',
'易烊千玺大妈同款拍照姿势',
'田雨白玉兰最佳男配角',
'秦霄贤大波浪舞台好炸',
'美国没资格要求安理会恢复对伊制裁',
'美国西部97处大规模山火在燃烧',
'考生伪造清华录取通知书',
'薇娅李子柒当选全国青联委员',
'贱内的意思',
'青簪行预告是原声',
'黄子韬爸爸去世']
group2_cluster1_down = ['150米盲道被改成十八弯',
'2020亚洲小姐竞选面试',
'2020年标准地图发布',
'31省区市新增确诊14例',
'347国道发生大面积山体垮塌',
'8月有5场天象奇观',
'DWG获得LCK夏季赛冠军',
'Haro的盲僧',
'S10抽签仪式时间',
'THE9首支MV',
'一旦接受了这个设定后',
'世卫称全球需要多种类型疫苗',
'丝滑可可糯米糍',
'为什么火箭能飞太空飞机却不行',
'乌童改造玲珑',
'乌鲁木齐开展全民免费核酸检测',
'乔丹支持NBA重启',
'乘风破浪的姐姐三公分组',
'云南新增境外航空输入病例1例',
'人类早期驯服四肢的过程',
'伦敦运河边的芭蕾舞者',
'何洛洛呼吁粉丝在机场保持秩序',
'全球二季度最赚钱100家企业',
'准格尔首试免费接种宫颈癌疫苗',
'出海观鲸偶遇海豚群狂奔',
'别用衣架晾口罩',
'北京警方起底美容贷',
'北京高校承担学生返校核酸检测费用',
'十几岁和现在恋爱的区别',
'南京八卦洲首批人员撤离',
'参与玉树救援的功勋犬天宝去世',
'台风美莎克3日中午前后移入吉林',
'商务部回应美方再次升级对华为打压',
'喉舌齿唇的发音部位',
'在劫难逃刺激',
'大连中风险封闭社区菜粮供应充足',
'天津儿童免费乘车身高标准提至1米3',
'央视记者实地探访乌鲁木齐社区',
'女生中奖1吨娃哈哈送小朋友',
'奶奶写的少女日记',
'如何优雅的表达不满',
'如何调整你的电脑桌椅',
'学校复学成美国下一阶段疫情防控挑战',
'宁吉喆说下半年最大挑战是稳就业',
'宁静组拉横幅拉票',
'安徽全椒县滁河实施爆破泄洪',
'山东龙口一幼儿园校车与大货车相撞',
'山西一施工罐车往河中倒水泥',
'师德表现将作为教师职称评审首要条件',
'广东47岁民警突发疾病倒在工作岗位',
'广州暂停进口疫区冷冻肉制品和水产品',
'库兹马三分绝杀',
'张文宏称正在研究新冠特效药',
'张萌不好意思找王一博合影',
'张雨绮山东话rap',
'徐艺洋 过程比结果重要',
'微博将整治大胃王吃播内容',
'德阳安医生自杀案今日开庭',
'成都2名男子路中积水游泳比赛',
'我国拟修法明确禁止倒挂国旗',
'我的青春疼痛',
'数辆过路车25秒静待老人过马路',
'方硕最佳第六人',
'曹操墓出土文物已修复900余件',
'最喜欢的手机设计',
'朱朝阳为严良庆生',
'李斯丹妮中二vlog',
'杨易脑王之王',
'林郑月娥率队接受新冠病毒检测',
'校方回应学生雨夜追星',
'梦露 伊丽莎白泰勒',
'汪苏泷线上演唱会', #45 link not finished
'淮河发生2020年第1号洪水',
'深航东航相关安全事件调查结果',
'焦雅辉说希望理解医护不止在今天',
'父亲回应家属被男子持铁锹暴打',
'独行侠快船冲突',
'王一博镜子蹲',
'王丽坤手劲好大',
'瑞丽已完成核酸检测全部为阴性',
'福奇警告称美国疫情正向中西部蔓延',
'福建下海救人的第三匹马已痊愈',
'福建漳州龙海一厂房被吹倒',
'科沃尔恶意犯规',
'纽约周末发生多起枪击案',
'网络游戏实名认证系统',
'美国300家必胜客计划永久关闭',
'美国扩大召回75款洗手液',
'美国波特兰示威者推倒法院围栏',
'老师授课的职业绝活',
'考试成绩理想是什么体验',
'耳朵的工作量太大了',
'芝加哥所有桥梁升起',
'花木兰放弃北美院线发行',
'苹果秘密收购以色列相机公司',
'菠萝芥末油条虾',
'蒂姆状态',
'西安业主自管小区一年盈利83万',
'西班牙将扑杀10万只养殖貂',
'西电拟清退33名失联超期博士生',
'课间操跳得像康复训练',
'贵州吉他村一年产800万把',
'赵继伟高难度三分',
'路人看到王俊凯的表情',
'跳河救人被冲走司机遗体找到',
'迪丽热巴裤装造型', #here <--
'重庆磁器口景区关闭',
'雪顶气泡葡萄冰',
'香港设计师绘画赠内地支援队',
'马主人回应下海救人两匹马去世',
'骗取贷款嫌疑人李冠君被通缉',
'高考报考避坑口诀',
'鹿晗连续6天拍打戏中暑',
'黄晓明夏日午后大片',
'黄景瑜喝酒对瓶吹',
'黄金薯片爆浆拉丝芝士球',
'黄鹤楼免票首日客流量涨3倍']
'''percentage of stepwise
average of born in rome/sleeping beauty
(1) how long it takes
(2)the way it reaches the HSL
(3) contextual, what is about,
related to each other, make relationship between these aspects
hope to understand the mechanism, depending on.... find relation and quantify
maybe try different intervals and see how the statistics changes, trial and error
show typical examples, make averages, how many steps, link to the nature of hashtag/when arrive
tell problems/limitations, % percent of the data was corrupted
leave out the statistics, but describe ... within 5 minutes? artificial
the list is manipulated, not folloiwng automated algorithm
method and data : describe the problems
'''
for keyword in group2_cluster1_down[72:]: # try just one element in the list #social_hashtags:
os.chdir(cwd + "/" + type_group_cluster) #working directory 很重要!!!
if keyword + "keywordpost.txt" in os.listdir():
df = pd.read_csv(keyword + "keywordpost.txt", sep = "\t")
filtered_df = df[df['转发数'].notnull()]
print(len(filtered_df))
filtered_new = filtered_df.drop_duplicates()
print(len(filtered_new))
# 再爬一遍不包含keyword的吧,因为存在展开全文这个东西。。。
#filtered_new = filtered_df[~filtered_df["发布内容"].str.contains(keyword)]
#filtered_new = filtered_df[filtered_df['发布内容'].str.contains(keyword, na=False)]
# Filter out the rows that doesn't include the hashtag !!!!!!!!!!
keyword_url_list = list(filtered_new["手机版链接"])
print(keyword, len(keyword_url_list))
##### make new dir to store reposts and change to new dir
dir = cwd + "/" + type_group_cluster + "/" + keyword ##!!!!!!!
if not os.path.exists(dir):
os.mkdir(keyword)
os.chdir(cwd + "/" + type_group_cluster + "/" + keyword)
## start crawling reposts in the new dir
for i in range(len(keyword_url_list)): ##Notice the beginning number !!!!!!!!!! #应该53!
print("This is the {}th mobile repost link".format(i+1))
GetRepost(getUrlList(keyword_url_list[i]))
print("The {}th mobile repost link is done.".format(i+1))
print("The whole crawling is done." + keyword)
##########-------------------------------------------------##################
|
from osgeo import gdal
import numpy as np
import os
from datetime import datetime
class Composite:
"""
Creates an averaged composite of any number of
individual single band rasters.
Developed for use within QGIS plugin, but can be
used as a standalone module, although the metadata
is currently specific to sea ice concetration
from NSIDC.
Inputs:
filelist of rasters that need compositing
calculation - with 'mean' or 'median'
"""
def __init__(self, filelist, sd, ed, calculation='mean'):
self.filelist = filelist
self.composite_name = os.path.join(os.path.dirname(self.filelist[0]), 'nt_{0}_{1}_composite.tif'.format(sd.strftime('%Y%m%d'), ed.strftime('%Y%m%d')))
g = gdal.Open(self.filelist[0])
self.proj = g.GetProjection()
self.outgeo = g.GetGeoTransform()
self.nodata = g.GetRasterBand(1).GetNoDataValue()
self.calculation = calculation
self.startdate = sd
self.enddate = ed
arr = g.ReadAsArray()
[self.cols, self.rows] = arr.shape
def composite(self):
"""
Creates a composite from the input files.
"""
arr = self.__getarray(self.filelist)
arr = self.__averagearr(arr, self.calculation)
tif = self.__savearr(arr, self.composite_name)
def __getarray(self, filelist):
"""
Puts together a 3d array from the list of input files
"""
g = gdal.Open(filelist[0])
x = g.ReadAsArray()
[cols,rows] = x.shape
new_arr = np.empty((len(filelist),cols,rows), dtype=np.uint8)
[dims,cols,rows] = new_arr.shape
for f in filelist:
i = filelist.index(f)
g = gdal.Open(f)
arr = g.ReadAsArray()
new_arr[i,...] = arr
g = None
return new_arr
def __averagearr(self, arr, calculation):
"""
Calculates a median or a mean array
Default is mean
"""
med = arr[0,...]
[dims,cols,rows] = arr.shape
for i in range(0,cols-1):
for j in range(0,rows-1):
values = arr[...,i,j]
if calculation == 'mean':
new_val = np.mean(values)
med[i,j] = new_val
if calculation == 'median':
new_val = np.median(values)
med[i,j] = new_val
return med
def __savearr(self, arr, outputname):
"""
Saves the output file as a geotif
"""
outfile = gdal.GetDriverByName("GTiff")
dst_ds = outfile.Create(outputname, self.rows, self.cols, 1, gdal.GDT_Byte)
dst_ds.SetProjection(self.proj)
dst_ds.SetGeoTransform(self.outgeo)
dst_ds.SetMetadataItem('PRODUCT', '{} sea ice concentration'.format(self.calculation))
dst_ds.SetMetadataItem('UNITS', 'Percentage %')
dst_ds.SetMetadataItem('DATA_PROVIDER', 'NSIDC')
dst_ds.SetMetadataItem('DATA_START_DATE', self.startdate.strftime('%Y-%m-%d'))
dst_ds.SetMetadataItem('DATA_END_DATE', self.enddate.strftime('%Y-%m-%d'))
dt = datetime.now()
dts = dt.strftime('%Y-%m-%d')
dst_ds.SetMetadataItem('CREATION_DATE', dts)
dst_ds.SetMetadataItem('RESOLUTION', '25km')
dst_ds.SetMetadataItem('PARAMETER', 'Sea ice concentration')
dst_ds.SetMetadataItem('ALGORITHM', 'NASA Team')
band = dst_ds.GetRasterBand(1)
band.WriteArray(arr)
band.SetNoDataValue(self.nodata)
if __name__ == "__main__":
files = ['~/rsr/qgis-dev/compositeseaice/nt_19950101_f11_v01_s.tif', '~/rsr/qgis-dev/compositeseaice/nt_19950102_f11_v01_s.tif']
Comp = Composite(files)
Comp.composite()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written in place of AboutBlocks in the Ruby Koans
#
# Note: Both blocks and generators use a yield keyword, but they behave
# a lot differently
#
from runner.koan import *
class AboutGenerators(Koan):
# def test_generating_values_on_the_fly(self):
# result = list()
# bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
#
# for bacon in bacon_generator:
# result.append(bacon)
#
# self.assertEqual(__, result)
# https://www.learnpython.org/en/Generators
# https://book.pythontips.com/en/latest/generators.html
# Generators are iterators, but you can only iterate over them once. It’s because they do not store
# all the values in memory, they generate the values on the fly. You use them by iterating over them,
# either with a ‘for’ loop or by passing them to any function or construct that iterates.
# Most of the time generators are implemented as functions. However, they do not return a value,
# they yield it.
# Generators are best for calculating large sets of results
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(['crunchy bacon', 'veggie bacon', 'danish bacon'], result)
# def test_generators_are_different_to_list_comprehensions(self):
# num_list = [x*2 for x in range(1,3)]
# num_generator = (x*2 for x in range(1,3))
#
# self.assertEqual(2, num_list[0])
#
# # A generator has to be iterated through.
# with self.assertRaises(___): num = num_generator[0]
#
# self.assertEqual(__, list(num_generator)[0])
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x*2 for x in range(1,3)]
num_generator = (x*2 for x in range(1,3))
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
with self.assertRaises(TypeError): num = num_generator[0]
self.assertEqual(2, list(num_generator)[0])
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
#
# def test_generator_expressions_are_a_one_shot_deal(self):
# dynamite = ('Boom!' for n in range(3))
#
# attempt1 = list(dynamite)
# attempt2 = list(dynamite)
#
# self.assertEqual(__, attempt1)
# self.assertEqual(__, attempt2)
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(['Boom!', 'Boom!','Boom!'], attempt1)
self.assertEqual([], attempt2)
# ------------------------------------------------------------------
# def simple_generator_method(self):
# yield 'peanut'
# yield 'butter'
# yield 'and'
# yield 'jelly'
#
# def test_generator_method_will_yield_values_during_iteration(self):
# result = list()
# for item in self.simple_generator_method():
# result.append(item)
# self.assertEqual(__, result)
# https://wiki.python.org/moin/Generators
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(['peanut', 'butter', 'and', 'jelly'], result)
#
# def test_generators_can_be_manually_iterated_and_closed(self):
# result = self.simple_generator_method()
# self.assertEqual(__, next(result))
# self.assertEqual(__, next(result))
# result.close()
# https://www.oreilly.com/library/view/python-cookbook-3rd/9781449357337/ch04.html
# https://anandology.com/python-practice-book/iterators.html
# https://realpython.com/introduction-to-python-generators/#how-to-use-close
# As its name implies, .close() allows you to stop a generator. This can be especially
# handy when controlling an infinite sequence generator.
def test_generators_can_be_manually_iterated_and_closed(self):
result = self.simple_generator_method()
self.assertEqual('peanut', next(result))
self.assertEqual('butter', next(result))
result.close()
# ------------------------------------------------------------------
#
# def square_me(self, seq):
# for x in seq:
# yield x * x
#
# def test_generator_method_with_parameter(self):
# result = self.square_me(range(2,5))
# self.assertEqual(__, list(result))
# https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do
# https://www.geeksforgeeks.org/python-yield-keyword/
# Yield is a keyword in Python that is used to return from a function without destroying the states
# of its local variable and when the function is called, the execution starts from the last yield
# statement. Any function that contains a yield keyword is termed a generator.
# Hence, yield is what makes a generator. The yield keyword in Python is less known
# off but has a greater utility which one can think of.
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2,5))
self.assertEqual([4,9,16], list(result))
# ------------------------------------------------------------------
# def sum_it(self, seq):
# value = 0
# for num in seq:
# # The local state of 'value' will be retained between iterations
# value += num
# yield value
#
# def test_generator_keeps_track_of_local_variables(self):
# result = self.sum_it(range(2,5))
# self.assertEqual(__, list(result))
# https://stackoverflow.com/questions/29759219/how-do-python-generator-functions-maintain-local-state
# https://www.programiz.com/python-programming/generator
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2,5))
self.assertEqual([2,5,9], list(result))
# ------------------------------------------------------------------
# def coroutine(self):
# result = yield
# yield result
#
# def test_generators_can_act_as_coroutines(self):
# generator = self.coroutine()
#
# # THINK ABOUT IT:
# # Why is this line necessary?
# #
# # Hint: Read the "Specification: Sending Values into Generators"
# # section of http://www.python.org/dev/peps/pep-0342/
# next(generator)
#
# self.assertEqual(__, generator.send(1 + 2))
# https://docs.python.org/3/library/asyncio-task.html
# https://www.geeksforgeeks.org/coroutine-in-python/
# https://www.python.org/dev/peps/pep-0342/#:~:text=Python's%20generator%20functions%20are%20almost,passed%20in%20when%20execution%20resumes.
# Coroutines are a natural way of expressing many algorithms, such as simulations, games,
# asynchronous I/O, and other forms of event-driven programming or co-operative multitasking.
# Python's generator functions are almost coroutines -- but not quite -- in that they allow
# pausing execution to produce a value, but do not provide for values or exceptions to be passed
# in when execution resumes.
def coroutine(self):
result = yield
yield result
def test_generators_can_act_as_coroutines(self):
generator = self.coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(3, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.coroutine()
try:
generator.send(1 + 2)
except TypeError as ex:
self.assertRegex(ex.args[0], 'generator')
# ------------------------------------------------------------------
# def yield_tester(self):
# value = yield
# if value:
# yield value
# else:
# yield 'no value'
#
# def test_generators_can_see_if_they_have_been_called_with_a_value(self):
# generator = self.yield_tester()
# next(generator)
# self.assertEqual('with value', generator.send('with value'))
#
# generator2 = self.yield_tester()
# next(generator2)
# self.assertEqual(__, next(generator2))
#
# def test_send_none_is_equivalent_to_next(self):
# generator = self.yield_tester()
#
# next(generator)
# # 'next(generator)' is exactly equivalent to 'generator.send(None)'
# self.assertEqual(__, generator.send(None))
# https://realpython.com/introduction-to-python-generators/#how-to-use-close
# https://stackoverflow.com/questions/19302530/python-generator-send-function-purpose
# generator.send(None), It's used to send values into a generator that just yielded
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual('no value', next(generator2))
def test_send_none_is_equivalent_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivalent to 'generator.send(None)'
self.assertEqual('no value', generator.send(None))
|
<filename>warthog/config.py
# -*- coding: utf-8 -*-
#
# Warthog - Simple client for A10 load balancers
#
# Copyright 2014-2016 <NAME>
#
# Available under the MIT license. See LICENSE for details.
#
"""
warthog.config
~~~~~~~~~~~~~~
Load and parse configuration for a client from an INI-style file.
"""
import collections
import sys
import threading
import codecs
import os.path
import warthog.exceptions
import warthog.ssl
from .packages import six
# pylint: disable=import-error
from .packages.six.moves import configparser
# List of locations (from most preferred to least preferred) that will
# be searched for a configuration file. These locations are typically
# only searched when an explicit configuration file is not used.
DEFAULT_CONFIG_LOCATIONS = [
os.path.join('/etc', 'warthog', 'warthog.ini'),
os.path.join('/etc', 'warthog.ini'),
os.path.join(sys.prefix, 'etc', 'warthog', 'warthog.ini'),
os.path.join(sys.prefix, 'etc', 'warthog.ini'),
os.path.join(os.path.expanduser('~'), '.warthog.ini'),
os.path.join(os.getcwd(), 'warthog.ini')
]
# By default, we assume that the configuration file is in UTF-8 unless
# the caller indicates it is in some other encoding.
DEFAULT_CONFIG_ENCODING = 'utf-8'
# Simple immutable struct to hold configuration information for a WarthogClient
WarthogConfigSettings = collections.namedtuple(
'WarthogConfigSettings', ['scheme_host', 'username', 'password', 'verify', 'ssl_version'])
class WarthogConfigLoader(object):
"""Load and parse configuration from an INI-style WarthogClient configuration file.
If a specific configuration file is given during construction, this file will
be used instead of checking the multiple possible default locations for configuration
files. The default locations to be checked are an ordered list of paths contained
in :data:`DEFAULT_CONFIG_LOCATIONS`.
.. note::
When checking for configuration files in default locations, each file will only
be checked to see if it exists. It will not be checked to see if the file is
readable or correctly formatted.
This class is thread safe.
.. versionadded:: 0.4.0
.. versionchanged:: 0.6.0
Loading, parsing, and access of configuration settings is now thread safe.
.. versionchanged:: 0.6.0
The .parse_configuration() method has been removed and the functionality has
been split into the .initialize() and .get_settings() methods.
.. versionchanged:: 0.10.0
The :meth:`WarthogConfigLoader.__init__` method no longer directly takes a standard
library INI parser as an option parameter, instead it now takes a WarthogConfigParser
instance as an optional parameter.
.. versionchanged:: 0.10.0
See :doc:`changes` or :doc:`cli` for details about the changes to configuration
file format.
"""
def __init__(self, config_file=None, encoding=None, path_resolver=None, config_parser=None):
"""Optionally, set a specific configuration file, the encoding of the file, resolver
to determine the configuration file to use, and custom configuration parser implementation.
By default, multiple locations will be checked for a configuration file, the file
is assumed to use UTF-8 encoding, and an
:param str|unicode config_file: Optional explicit path to a configuration file
to use.
:param str|unicode encoding: Encoding to use for reading the configuration file.
Default is UTF-8
:param callable path_resolver: Callable that accepts a single argument (the explicit
configuration file path to use) and determines what configuration file to use. It
is typically only necessary to set this parameter for unit testing purposes.
:param WarthogConfigParser config_parser: Optional configuration parser to use for
reading and parsing the expected INI format for a Warthog configuration file. It
is typically only necessary to set this parameter for unit testing purposes.
"""
self._config_file = config_file
self._encoding = encoding if encoding is not None else DEFAULT_CONFIG_ENCODING
self._path_resolver = path_resolver if path_resolver is not None else \
WarthogConfigFileResolver(DEFAULT_CONFIG_LOCATIONS)
self._parser = config_parser if config_parser is not None else \
WarthogConfigParser(configparser.SafeConfigParser())
self._lock = threading.RLock()
self._settings = None
def initialize(self):
"""Load and parse a configuration an INI-style configuration file.
The values parsed will be stored as a :class:`WarthogConfigSettings` instance that
may be accessed with the :meth:`get_settings` method.
.. versionadded:: 0.6.0
.. versionchanged:: 0.8.0
Errors locating or parsing configuration files now result in Warthog-specific
exceptions (:class:`warthog.exceptions.WarthogConfigError`) instead of
`ValueError`, `IOError`, or `RuntimeError`.
:return: Fluent interface
:rtype: WarthogConfigLoader
:raises warthog.exceptions.WarthogNoConfigFileError: If no explicit configuration file
was given and there were no configuration files in any of the default locations
checked or if the configuration file could not be opened or read for some
reason.
:raises warthog.exceptions.WarthogMalformedConfigFileError: If the configuration
file was malformed such has missing the required 'warthog' section or any of
the expected values. See the :doc:`cli` section for more information about the
expected configuration settings.
"""
with self._lock:
config_file, checked = self._path_resolver(self._config_file)
self._settings = self._parser.parse(config_file, self._encoding, checked)
return self
def get_settings(self):
"""Get previously loaded and parsed configuration settings, raise an exception
if the settings have not already been loaded and parsed.
.. versionadded:: 0.6.0
:return: Struct of configuration settings for the Warthog client
:rtype: WarthogConfigSettings
:raises RuntimeError: If a configuration file has not already been loaded and
parsed.
"""
with self._lock:
if self._settings is None:
raise RuntimeError(
"Configuration file must be loaded and parsed before "
"settings can be used (via the .initialize() method)")
return self._settings
def parse_ssl_version(version_str, ssl_module=None):
"""Get the :mod:`warthog.ssl` protocol constant that represents the given version
string if it exists, raising an error if the version string is malformed or
does not correspond to a supported protocol.
Note that the :mod:`warthog.ssl` protocol constants should match the Python
:mod:`ssl` module exactly. The difference is that our SSL module has all
potential versions while older Python modules did not.
:param unicode version_str: Version string to resolve to a protocol
:param module ssl_module: SSL module to get the protocol constant from
:return: The ssl module protocol constant or ``None``
:raises ValueError: If the version string did not match any known versions
of SSL or TLS
"""
if version_str is None:
return None
version_str = version_str.strip()
if not version_str:
return None
ssl_module = ssl_module if ssl_module is not None else warthog.ssl
# Get a list of all the 'PROTOCOL' constants in the SSL module, and
# strip the 'PROTOCOL_' prefix. This is the set of supported SSL or
# TLS versions that we'll compare the user input against.
supported = set([const.replace('PROTOCOL_', '', 1) for const in dir(ssl_module)
if const.startswith('PROTOCOL_')])
if version_str in supported:
return getattr(ssl_module, 'PROTOCOL_' + version_str)
raise ValueError(
"Unsupported SSL/TLS version '" + version_str + "'. Supported: " + ', '.join(supported))
class WarthogConfigFileResolver(object):
"""Callable that returns a tuple of the form $path, $searched where
$path is the configuration file that should be used or None (if there
were no suitable files found) and $searched is a list of all paths that
were checked to find a suitable file.
If a configuration file was explicitly provided, it will be used without
checking any of the default locations. If no file was explicitly provided
each of the default locations will be checked to find one configuration
file that exists.
If no suitable file could be found, the callable will return the tuple
``None``, $searched where $searched is the list of locations checked.
"""
def __init__(self, default_locations, exists_impl=None):
self._default_locations = default_locations
self._exists_impl = exists_impl if exists_impl is not None else os.path.exists
def __call__(self, path):
if path is not None:
return path, [path]
for default in self._default_locations:
if self._exists_impl(default):
return default, self._default_locations
return None, self._default_locations
class WarthogConfigParser(object):
"""Facade for a standard library INI file parser that parses the expected
configuration values for configuring a :class:`warthog.client.WarthogClient`
instance.
All configuration values are expected to be in the ``warthog`` section of
the INI file. The ``ssl_version`` and ``verify`` values are not required, all
others are.
This class is not thread safe.
"""
def __init__(self, parser_impl, open_impl=None):
"""Set the underlying standard library INI parser to use for reading
Warthog configuration settings.
:param configparser.RawConfigParser parser_impl: INI file parser to use
for parsing Warthog configuration settings.
:param callable open_impl: Open method for opening the configuration
file. The method is expected to have the same signature as
:func:`codes.open`. Callers should only need to supply this for unit
testing purposes.
"""
self._parser_impl = parser_impl
self._open_impl = open_impl if open_impl is not None else codecs.open
def _load_file(self, path, encoding, checked):
"""Open and load the configuration file at the given path."""
if path is None:
raise warthog.exceptions.WarthogNoConfigFileError(
"No configuration file was specified. Please set a "
"configuration file or ensure that a configuration "
"file exists in one of the default locations checked",
locations_checked=checked)
try:
with self._open_impl(path, 'r', encoding=encoding) as handle:
self._parser_impl.readfp(handle)
except IOError as e:
six.reraise(
warthog.exceptions.WarthogNoConfigFileError,
warthog.exceptions.WarthogNoConfigFileError(
"The configuration file does not exist or could not read. "
"Please make sure {0} exists and can be read by the current "
"user. Original error message: {1}".format(path, e),
locations_checked=checked),
sys.exc_info()[2])
except UnicodeError as e:
six.reraise(
warthog.exceptions.WarthogMalformedConfigFileError,
warthog.exceptions.WarthogMalformedConfigFileError(
"The configuration file {0} doesn't seem to be correctly encoded "
"{1} text. Please ensure that the file is valid text. Original "
"error message: {2}".format(path, encoding, e)),
sys.exc_info()[2])
def _get_ssl_version(self, section, option):
"""Get the specified TLS version in the config file or None."""
if self._parser_impl.has_option(section, option):
return parse_ssl_version(self._parser_impl.get(section, option))
return None
def _get_verify(self, section, option):
"""Get the certificate verify option in the config file or None."""
if self._parser_impl.has_option(section, option):
return self._parser_impl.getboolean(section, option)
return None
def _parse_file(self):
"""Parse the opened configuration file and return the results as a namedtuple."""
try:
scheme_host = self._parser_impl.get('warthog', 'scheme_host')
username = self._parser_impl.get('warthog', 'username')
password = self._parser_impl.get('warthog', 'password')
verify = self._get_verify('warthog', 'verify')
ssl_version = self._get_ssl_version('warthog', 'ssl_version')
except configparser.NoSectionError as e:
raise warthog.exceptions.WarthogMalformedConfigFileError(
"The configuration file seems to be missing a '{0}' section. Please "
"make sure this section exists".format(e.section), missing_section=e.section)
except configparser.NoOptionError as e:
raise warthog.exceptions.WarthogMalformedConfigFileError(
"The configuration file seems to be missing the '{0}' option. Please "
"make sure this option exists".format(e.option), missing_option=e.option)
return WarthogConfigSettings(
scheme_host=scheme_host,
username=username,
password=password,
verify=verify,
ssl_version=ssl_version)
def parse(self, path, encoding, checked):
"""Attempt to open and parse the configuration file at the given
path.
:param str|unicode path: Path to the configuration file to parse.
:param str|unicode encoding: Encoding to use when opening the file.
:param list checked: List of the various locations checked before
deciding on the configuration file to open.
:return: The parsed configuration settings to use for creating a client.
:rtype: WarthogConfigSettings
"""
self._load_file(path, encoding, checked)
return self._parse_file()
|
<gh_stars>1-10
# Copyright (c) 2011-2012 <NAME> and <NAME>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Character encoding detection library."""
import os
import sys
import struct
ENCODE_REPLACEMENT_CHARACTER = '\x00'
MODEL_VERSION = '1.3'
def list_models():
"Returns a list of inbuilt models."
models = []
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
for filename in os.listdir(models_dir):
if filename.endswith('.edm'):
models.append(filename.rsplit('.', 1)[0])
return sorted(models)
def get_model_path(model_id):
"""
Returns the full path to the model with given id or None if no model with
the ID exists.
"""
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
filepath = os.path.join(models_dir, model_id + '.edm')
if os.path.isfile(filepath):
return filepath
else:
return None
def scalar_product(vec1, vec2):
"Returns a scalar product of the two vectors."
result = 0
for key in vec1.keys():
if vec2.has_key(key):
result += vec1[key] * vec2[key]
return result
def replace_by_zero(error):
"""
Replaces unknown bytes while encoding/decoding.
The function has to be registered using codecs.register_error.
"""
if isinstance(error, UnicodeEncodeError):
return (unicode(ENCODE_REPLACEMENT_CHARACTER), error.end)
elif isinstance(error, UnicodeDecodeError):
return (u'\ufffd', error.end)
raise error
class EncodingDetector(object):
VECTOR_TUPLE_LENGTH = 3
def __init__(self, version=MODEL_VERSION, vectors={}, enc_order=()):
self._version = version
self._vectors = vectors
self._encodings_order = enc_order
def get_version(self):
return self._version
def save(self, path):
"""
Saves the model to the specified path.
File format:
general row: <verison><TAB><tuple length><TAB><encodings count>
for each encoding:
info row: <name><TAB><order><TAB><vector length>
vector row: <key><packed value>...
"""
with open(path, 'wb') as fp:
#basic attributes
fp.write('%s\t%d\t%d\n' %
(self._version, self.VECTOR_TUPLE_LENGTH, len(self._vectors)))
#vectors
for enc, vector in self._vectors.iteritems():
#encoding name, encoding order
vect_len = len(vector)
enc_order = self.get_encoding_order(enc)
fp.write('%s\t%d\t%d\n' % (enc, enc_order, vect_len))
#vector keys & values
for k, v in vector.iteritems():
fp.write('%s%s' % (k, struct.pack('=I', v)))
fp.write('\n')
@classmethod
def load(cls, path):
"""
Loads the model from the specified path.
Returns a new instance of EncodingDetector.
"""
version = ''
vectors = {}
enc_order = {}
with open(path, 'rb') as fp:
#basic attributes
version, vect_tuple_length, enc_count = fp.readline().split('\t')
if MODEL_VERSION != version:
sys.stderr.write('WARNING: Potentially incompatible model versions!\n')
sys.stderr.write('\t%s: %s\n\tthis module: %s\n' % (path, version, MODEL_VERSION))
vect_tuple_length = int(vect_tuple_length)
#vectors
for i in range(int(enc_count)):
#encoding name, encoding order
enc, order, vect_len = fp.readline().split('\t')
enc_order[int(order)] = enc
#vector keys & values
vectors[enc] = {}
for j in range(int(vect_len)):
key = fp.read(vect_tuple_length)
vectors[enc][key] = struct.unpack('=I', fp.read(4))[0]
fp.read(1)
return EncodingDetector(version, vectors, enc_order.values())
def vectorize(self, string):
"""
Transforms the input strings into a frequency vector of n-grams of
contained characters.
Omits vector keys containing the encoding replacement character.
"""
str_len = len(string)
if self.VECTOR_TUPLE_LENGTH > str_len:
return {}
vector = {}
for i in range(str_len - self.VECTOR_TUPLE_LENGTH + 1):
key = string[i:i + self.VECTOR_TUPLE_LENGTH]
if ENCODE_REPLACEMENT_CHARACTER not in key:
vector[key] = vector.get(key, 0) + 1
return vector
def train(self, string, encoding):
"Trains the detector. The input must be a string and its encoding."
self._vectors[encoding] = self.vectorize(string)
def set_encodings_order(self, encodings):
"""
Defines the order (importance / frequency of use) of the encodings
the classifier has been trained on. The input must be a list or a
tuple of encodings. The first is the most important and the last is
the least important.
"""
if not isinstance(encodings, (tuple, list)):
raise TypeError
self._encodings_order = tuple(encodings)
def get_encoding_order(self, encoding):
"""
Returns the order of the encoding or sys.maxint if no order is
defined for it.
"""
if encoding in self._encodings_order:
return self._encodings_order.index(encoding)
return sys.maxint
def classify(self, string):
"""
Returns the predicted character encoding(s) for the input string as
a list. The list may contain more than one element if there are
multiple equally likely candidates. In this case, the candidates are
returned in the order of importance (see set_encodings_order). Empty
list may be returned if there are no valid candidates.
"""
input_vector = self.vectorize(string)
classification = []
for clas, vector in self._vectors.iteritems():
score = scalar_product(input_vector, vector)
clas_info = {'clas': clas, 'score': score,
'order': self.get_encoding_order(clas)}
classification.append(clas_info)
if not classification:
return []
#order result classes
# 1.) by vector similarity score (higher score is better)
# 2.) by the encoding order (lower index is better)
classification.sort(lambda x, y:
cmp(y['score'], x['score']) or cmp(x['order'], y['order']))
#return a list of the top classes
# the top classes have the same score and order as the first one
first = classification[0]
result = []
for clas in classification:
if first['score'] == clas['score']:
result.append(clas['clas'])
return result
def reduce_vectors(self):
"""
Remove the common parts of all vectors. Should be called after all
training data has been loaded. Provided the training has been performed
on the same data for all encodings, reducing vectors increases both
efficiency and accuracy of the classification.
"""
#get frequencies of (key, value) pairs
key_value_count = {}
for vect in self._vectors.values():
for key, value in vect.iteritems():
key_value_count[(key, value)] = key_value_count.get(
(key, value), 0) + 1
#remove common parts of vectors (the (key, value) pairs with the
#frequency equal to the number of vectors)
encodings_count = len(self._vectors)
for (key, value), count in key_value_count.iteritems():
if count >= encodings_count:
for vect in self._vectors.values():
if vect.has_key(key):
del vect[key]
|
<reponame>samarthdd/cdr-plugin-folder-to-folder
import json
from unittest import TestCase
import dotenv
import pytest
from cdr_plugin_folder_to_folder.configure.Configure_Env import Configure_Env
from osbot_utils.utils.Files import folder_exists, folder_delete_all
from os import environ,path,remove,rename
from unittest.mock import patch,Mock
class test_Configure_Env(TestCase):
def setUp(self) -> None:
self.configure = Configure_Env()
@classmethod
def setUpClass(cls) -> None:
cls._dotenv_file=dotenv.find_dotenv()
if cls._dotenv_file :
rename(cls._dotenv_file ,path.join(path.dirname(cls._dotenv_file),".env_backup"))
@classmethod
def tearDownClass(cls) -> None:
if cls._dotenv_file:
rename(path.join(path.dirname(cls._dotenv_file), ".env_backup"),cls._dotenv_file)
def test_configure(self):
hd1_path = "./test_data/scenario-1/hd1"
hd2_path = "./test_data/scenario-1/hd2"
hd3_path = "./test_data/scenario-1/hd3"
response=self.configure.configure(hd1_path=hd1_path,
hd2_path=hd2_path,
hd3_path=hd3_path)
assert response is not None
assert self.configure.last_error_message == ""
self.assertEqual(environ["HD1_LOCATION"] , hd1_path)
self.assertEqual(environ["HD2_LOCATION"] , hd2_path)
self.assertEqual(environ["HD3_LOCATION"] , hd3_path)
def test_invalid_hd1(self):
hd1_path = "./test_data/scenario-1/hd1xyz"
hd2_path = "./test_data/scenario-1/hd2"
hd3_path = "./test_data/scenario-1/hd3"
response=self.configure.configure(hd1_path=hd1_path,
hd2_path=hd2_path,
hd3_path=hd3_path)
assert self.configure.last_error_message != ""
assert response is not None
def test_invalid_hd2(self):
hd1_path = "./test_data/scenario-1/hd1"
hd2_path = "./test_data/scenario-1/hd2xyz"
hd3_path = "./test_data/scenario-1/hd3"
response=self.configure.configure(hd1_path=hd1_path,
hd2_path=hd2_path,
hd3_path=hd3_path)
assert self.configure.last_error_message == ""
assert response is not None
assert folder_exists(hd2_path)
folder_delete_all(hd2_path)
def test_invalid_hd3(self):
hd1_path = "./test_data/scenario-1/hd1"
hd2_path = "./test_data/scenario-1/hd2"
hd3_path = "./test_data/scenario-1/hd3xyz"
response=self.configure.configure(hd1_path=hd1_path,
hd2_path=hd2_path,
hd3_path=hd3_path)
assert self.configure.last_error_message == ""
assert response is not None
assert folder_exists(hd3_path)
folder_delete_all(hd3_path)
@pytest.mark.skip("this is breaking current .env file (this needs to run on a temp .env file)")
@patch("cdr_plugin_folder_to_folder.configure.Configure_Env.Configure_Env.get_valid_endpoints")
def test_configure_multiple_gw_sdk_endpoints(self,mock_get_valid_endpoints):
endpoint_string = '{"Endpoints":[{"IP":"0.0.0.0", "Port":"8080"},{"IP":"0.0.0.1", "Port":"8080"}]}'
expected_return_value = '{"Endpoints":[{"IP":"0.0.0.0", "Port":"8080"}]}'
mock_get_valid_endpoints.return_value = expected_return_value
response=self.configure.configure_endpoints(endpoint_string=endpoint_string)
assert response is not None
self.assertEqual(response , json.loads(expected_return_value))
@patch("cdr_plugin_folder_to_folder.configure.Configure_Env.Configure_Env.gw_sdk_healthcheck")
def test_get_valid_endpoints(self,mock_gw_sdk_healthcheck):
endpoint_string = '{"Endpoints":[{"IP":"0.0.0.0", "Port":"8080"}]}'
response = self.configure.get_valid_endpoints(endpoint_string=endpoint_string)
assert response is None
mock_gw_sdk_healthcheck.return_value.status_code = 200
response = self.configure.get_valid_endpoints(endpoint_string=endpoint_string)
self.assertEqual(json.loads(response) , json.loads(endpoint_string))
@patch("requests.request")
def test_gw_sdk_healthcheck(self,mock_request):
mock_request.return_value.status_code=404
server_url="http://0.0.0.1:8800"
response = self.configure.gw_sdk_healthcheck(server_url)
assert response is not None
assert response.status_code == 404
|
"""
TODO: Not complete.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from itertools import count, cycle
from typing import Hashable, get_args
from EasyNN._abc import AutoDocumentation
from EasyNN.typing import Array1D, Command
import EasyNN.model.abc
class Optimizer(AutoDocumentation, ABC):
"""
Abstract Base Class for the Optimizer.
"""
models: dict[Hashable, EasyNN.model.abc.Model]
lr: float
def __init__(self: Optimizer, models: dict[Hashable, EasyNN.model.abc.Model] = None, lr: float = 1e-2) -> None:
self.models = dict() if models is None else models
self.lr = lr
def add(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Add a model to the optimizer."""
for i in count(len(self.models)):
if i not in self.models:
self.models[i] = model
break
def train(self: Optimizer, *models: EasyNN.model.abc.Model) -> tuple[EasyNN.model.abc.Model, ...]:
"""
Train the models.
Example:
>>> optimizer.train(model_1, model_2)
(model_1, model_2)
"""
# By default, use the saved optimizer models.
if len(models) == 0:
models = tuple(self.models.values())
# Setup the model commands.
for model in models:
self.setup(model)
# Cycle through each model.
for model in cycle(models):
# Run the next command for the model and stop if there are no more.
if next(model.commands, None) is None:
return models
def setup(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Setup a model."""
# Assign learning rates.
model._optimizer_lr = self.lr
# Add on the optimizer commands.
for command in set(get_args(Command)) - {"off"}:
model.callback(command)(getattr(self, command))
# Begin running model commands.
model.commands = model.optimizer_commands()
def get_derivatives(self: Optimizer, model: EasyNN.model.abc.Model) -> Array1D[float]:
"""Computes the derivatives for the optimizer."""
# Use the testing/validation samples.
if model.command.startswith("on_testing"):
model.sample_derivatives(*model.testing.sample)
elif model.command.startswith("on_validation"):
model.sample_derivatives(*model.validation.sample)
# Use the training sample by default.
else:
model.sample_derivatives(*model.training.sample)
return model.derivatives
def on_optimization_start(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the start of optimization."""
def on_optimization_end(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the end of optimization."""
@abstractmethod
def on_training_start(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the start of every training iteration. Required for optimizers."""
raise NotImplementedError
def on_training_end(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the end of every training iteration."""
def on_testing_start(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the start of every testing iteration. This is initialized after """
def on_testing_end(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the end of every testing iteration."""
def on_validation_start(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the start of every validation iteration. By default, attempts to tune the learning rate."""
parameters = model.parameters.copy()
model._optimizer_lr *= 2
self.on_training_start(model)
loss_1 = model.loss(*model.validation.sample)
model.parameters = parameters
model._optimizer_lr *= 0.25
self.on_training_start(model)
loss_2 = model.loss(*model.validation.sample)
model.parameters = parameters
if loss_1 < loss_2:
model._optimizer_lr *= 1.0625 / 0.5
else:
model._optimizer_lr *= 0.875 / 0.5
print(f" {model._optimizer_lr = }")
def on_validation_end(self: Optimizer, model: EasyNN.model.abc.Model) -> None:
"""Ran at the end of every validation iteration."""
def on_epoch_start(self: Optimizer, model: EasyNN.abc.Model) -> None:
"""Ran at the start of every epoch."""
def on_epoch_end(self: Optimizer, model: EasyNN.abc.Model) -> None:
"""Ran at the end of every epoch."""
|
<reponame>domwillcode/home-assistant<filename>homeassistant/components/tplink/light.py
"""Support for TPLink lights."""
from datetime import timedelta
import logging
import time
from typing import Any, Dict, NamedTuple, Tuple, cast
from pyHS100 import SmartBulb, SmartDeviceException
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
import homeassistant.util.dt as dt_util
from . import CONF_LIGHT, DOMAIN as TPLINK_DOMAIN
from .common import async_add_entities_retry
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=5)
CURRENT_POWER_UPDATE_INTERVAL = timedelta(seconds=60)
HISTORICAL_POWER_UPDATE_INTERVAL = timedelta(minutes=60)
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_W = "current_power_w"
ATTR_DAILY_ENERGY_KWH = "daily_energy_kwh"
ATTR_MONTHLY_ENERGY_KWH = "monthly_energy_kwh"
LIGHT_STATE_DFT_ON = "dft_on_state"
LIGHT_STATE_ON_OFF = "on_off"
LIGHT_STATE_RELAY_STATE = "relay_state"
LIGHT_STATE_BRIGHTNESS = "brightness"
LIGHT_STATE_COLOR_TEMP = "color_temp"
LIGHT_STATE_HUE = "hue"
LIGHT_STATE_SATURATION = "saturation"
LIGHT_STATE_ERROR_MSG = "err_msg"
LIGHT_SYSINFO_MAC = "mac"
LIGHT_SYSINFO_ALIAS = "alias"
LIGHT_SYSINFO_MODEL = "model"
LIGHT_SYSINFO_IS_DIMMABLE = "is_dimmable"
LIGHT_SYSINFO_IS_VARIABLE_COLOR_TEMP = "is_variable_color_temp"
LIGHT_SYSINFO_IS_COLOR = "is_color"
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up switches."""
await async_add_entities_retry(
hass, async_add_entities, hass.data[TPLINK_DOMAIN][CONF_LIGHT], add_entity
)
return True
def add_entity(device: SmartBulb, async_add_entities):
"""Check if device is online and add the entity."""
# Attempt to get the sysinfo. If it fails, it will raise an
# exception that is caught by async_add_entities_retry which
# will try again later.
device.get_sysinfo()
async_add_entities([TPLinkSmartBulb(device)], update_before_add=True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return round((byt * 100.0) / 255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return round((percent * 255.0) / 100.0)
class LightState(NamedTuple):
"""Light state."""
state: bool
brightness: int
color_temp: float
hs: Tuple[int, int]
def to_param(self):
"""Return a version that we can send to the bulb."""
if self.color_temp:
color_temp = mired_to_kelvin(self.color_temp)
else:
color_temp = None
return {
LIGHT_STATE_ON_OFF: 1 if self.state else 0,
LIGHT_STATE_BRIGHTNESS: brightness_to_percentage(self.brightness),
LIGHT_STATE_COLOR_TEMP: color_temp,
LIGHT_STATE_HUE: self.hs[0] if self.hs else 0,
LIGHT_STATE_SATURATION: self.hs[1] if self.hs else 0,
}
class LightFeatures(NamedTuple):
"""Light features."""
sysinfo: Dict[str, Any]
mac: str
alias: str
model: str
supported_features: int
min_mireds: float
max_mireds: float
has_emeter: bool
class TPLinkSmartBulb(LightEntity):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: SmartBulb) -> None:
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._light_features = cast(LightFeatures, None)
self._light_state = cast(LightState, None)
self._is_available = True
self._is_setting_light_state = False
self._last_current_power_update = None
self._last_historical_power_update = None
self._emeter_params = {}
@property
def unique_id(self):
"""Return a unique ID."""
return self._light_features.mac
@property
def name(self):
"""Return the name of the Smart Bulb."""
return self._light_features.alias
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._light_features.alias,
"model": self._light_features.model,
"manufacturer": "TP-Link",
"connections": {(dr.CONNECTION_NETWORK_MAC, self._light_features.mac)},
"sw_version": self._light_features.sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._is_available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = int(kwargs[ATTR_BRIGHTNESS])
elif self._light_state.brightness is not None:
brightness = self._light_state.brightness
else:
brightness = 255
if ATTR_COLOR_TEMP in kwargs:
color_tmp = int(kwargs[ATTR_COLOR_TEMP])
else:
color_tmp = self._light_state.color_temp
if ATTR_HS_COLOR in kwargs:
# TP-Link requires integers.
hue_sat = tuple(int(val) for val in kwargs[ATTR_HS_COLOR])
# TP-Link cannot have both color temp and hue_sat
color_tmp = 0
else:
hue_sat = self._light_state.hs
await self._async_set_light_state_retry(
self._light_state,
self._light_state._replace(
state=True, brightness=brightness, color_temp=color_tmp, hs=hue_sat,
),
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._async_set_light_state_retry(
self._light_state, self._light_state._replace(state=False),
)
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return self._light_features.min_mireds
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return self._light_features.max_mireds
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._light_state.color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._light_state.brightness
@property
def hs_color(self):
"""Return the color."""
return self._light_state.hs
@property
def is_on(self):
"""Return True if device is on."""
return self._light_state.state
def update(self):
"""Update the TP-Link Bulb's state."""
# State is currently being set, ignore.
if self._is_setting_light_state:
return
try:
# Update light features only once.
if not self._light_features:
self._light_features = self._get_light_features_retry()
self._light_state = self._get_light_state_retry()
self._is_available = True
except (SmartDeviceException, OSError) as ex:
if self._is_available:
_LOGGER.warning(
"Could not read data for %s: %s", self.smartbulb.host, ex
)
self._is_available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._light_features.supported_features
def _get_light_features_retry(self) -> LightFeatures:
"""Retry the retrieval of the supported features."""
try:
return self._get_light_features()
except (SmartDeviceException, OSError):
pass
_LOGGER.debug("Retrying getting light features")
return self._get_light_features()
def _get_light_features(self):
"""Determine all supported features in one go."""
sysinfo = self.smartbulb.sys_info
supported_features = 0
# Calling api here as it reformats
mac = self.smartbulb.mac
alias = sysinfo[LIGHT_SYSINFO_ALIAS]
model = sysinfo[LIGHT_SYSINFO_MODEL]
min_mireds = None
max_mireds = None
has_emeter = self.smartbulb.has_emeter
if sysinfo.get(LIGHT_SYSINFO_IS_DIMMABLE) or LIGHT_STATE_BRIGHTNESS in sysinfo:
supported_features += SUPPORT_BRIGHTNESS
if sysinfo.get(LIGHT_SYSINFO_IS_VARIABLE_COLOR_TEMP):
supported_features += SUPPORT_COLOR_TEMP
# Have to make another api request here in
# order to not re-implement pyHS100 here
max_range, min_range = self.smartbulb.valid_temperature_range
min_mireds = kelvin_to_mired(min_range)
max_mireds = kelvin_to_mired(max_range)
if sysinfo.get(LIGHT_SYSINFO_IS_COLOR):
supported_features += SUPPORT_COLOR
return LightFeatures(
sysinfo=sysinfo,
mac=mac,
alias=alias,
model=model,
supported_features=supported_features,
min_mireds=min_mireds,
max_mireds=max_mireds,
has_emeter=has_emeter,
)
def _get_light_state_retry(self) -> LightState:
"""Retry the retrieval of getting light states."""
try:
return self._get_light_state()
except (SmartDeviceException, OSError):
pass
_LOGGER.debug("Retrying getting light state")
return self._get_light_state()
def _light_state_from_params(self, light_state_params) -> LightState:
brightness = None
color_temp = None
hue_saturation = None
light_features = self._light_features
state = bool(light_state_params[LIGHT_STATE_ON_OFF])
if not state and LIGHT_STATE_DFT_ON in light_state_params:
light_state_params = light_state_params[LIGHT_STATE_DFT_ON]
if light_features.supported_features & SUPPORT_BRIGHTNESS:
brightness = brightness_from_percentage(
light_state_params[LIGHT_STATE_BRIGHTNESS]
)
if light_features.supported_features & SUPPORT_COLOR_TEMP:
if (
light_state_params.get(LIGHT_STATE_COLOR_TEMP) is not None
and light_state_params[LIGHT_STATE_COLOR_TEMP] != 0
):
color_temp = kelvin_to_mired(light_state_params[LIGHT_STATE_COLOR_TEMP])
if light_features.supported_features & SUPPORT_COLOR:
hue_saturation = (
light_state_params[LIGHT_STATE_HUE],
light_state_params[LIGHT_STATE_SATURATION],
)
return LightState(
state=state,
brightness=brightness,
color_temp=color_temp,
hs=hue_saturation,
)
def _get_light_state(self) -> LightState:
"""Get the light state."""
self._update_emeter()
return self._light_state_from_params(self._get_device_state())
def _update_emeter(self):
if not self._light_features.has_emeter:
return
now = dt_util.utcnow()
if (
not self._last_current_power_update
or self._last_current_power_update + CURRENT_POWER_UPDATE_INTERVAL < now
):
self._last_current_power_update = now
self._emeter_params[ATTR_CURRENT_POWER_W] = "{:.1f}".format(
self.smartbulb.current_consumption()
)
if (
not self._last_historical_power_update
or self._last_historical_power_update + HISTORICAL_POWER_UPDATE_INTERVAL
< now
):
self._last_historical_power_update = now
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_ENERGY_KWH] = "{:.3f}".format(
daily_statistics[int(time.strftime("%d"))]
)
self._emeter_params[ATTR_MONTHLY_ENERGY_KWH] = "{:.3f}".format(
monthly_statistics[int(time.strftime("%m"))]
)
except KeyError:
# device returned no daily/monthly history
pass
async def _async_set_light_state_retry(
self, old_light_state: LightState, new_light_state: LightState
) -> None:
"""Set the light state with retry."""
# Tell the device to set the states.
if not _light_state_diff(old_light_state, new_light_state):
# Nothing to do, avoid the executor
return
self._is_setting_light_state = True
try:
light_state_params = await self.hass.async_add_executor_job(
self._set_light_state, old_light_state, new_light_state
)
self._is_available = True
self._is_setting_light_state = False
if LIGHT_STATE_ERROR_MSG in light_state_params:
raise HomeAssistantError(light_state_params[LIGHT_STATE_ERROR_MSG])
self._light_state = self._light_state_from_params(light_state_params)
return
except (SmartDeviceException, OSError):
pass
try:
_LOGGER.debug("Retrying setting light state")
light_state_params = await self.hass.async_add_executor_job(
self._set_light_state, old_light_state, new_light_state
)
self._is_available = True
if LIGHT_STATE_ERROR_MSG in light_state_params:
raise HomeAssistantError(light_state_params[LIGHT_STATE_ERROR_MSG])
self._light_state = self._light_state_from_params(light_state_params)
except (SmartDeviceException, OSError) as ex:
self._is_available = False
_LOGGER.warning("Could not set data for %s: %s", self.smartbulb.host, ex)
self._is_setting_light_state = False
def _set_light_state(
self, old_light_state: LightState, new_light_state: LightState
) -> None:
"""Set the light state."""
diff = _light_state_diff(old_light_state, new_light_state)
if not diff:
return
return self._set_device_state(diff)
def _get_device_state(self):
"""State of the bulb or smart dimmer switch."""
if isinstance(self.smartbulb, SmartBulb):
return self.smartbulb.get_light_state()
sysinfo = self.smartbulb.sys_info
# Its not really a bulb, its a dimmable SmartPlug (aka Wall Switch)
return {
LIGHT_STATE_ON_OFF: sysinfo[LIGHT_STATE_RELAY_STATE],
LIGHT_STATE_BRIGHTNESS: sysinfo.get(LIGHT_STATE_BRIGHTNESS, 0),
LIGHT_STATE_COLOR_TEMP: 0,
LIGHT_STATE_HUE: 0,
LIGHT_STATE_SATURATION: 0,
}
def _set_device_state(self, state):
"""Set state of the bulb or smart dimmer switch."""
if isinstance(self.smartbulb, SmartBulb):
return self.smartbulb.set_light_state(state)
# Its not really a bulb, its a dimmable SmartPlug (aka Wall Switch)
if LIGHT_STATE_BRIGHTNESS in state:
# Brightness of 0 is accepted by the
# device but the underlying library rejects it
# so we turn off instead.
if state[LIGHT_STATE_BRIGHTNESS]:
self.smartbulb.brightness = state[LIGHT_STATE_BRIGHTNESS]
else:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_OFF
elif LIGHT_STATE_ON_OFF in state:
if state[LIGHT_STATE_ON_OFF]:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_ON
else:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_OFF
return self._get_device_state()
def _light_state_diff(old_light_state: LightState, new_light_state: LightState):
old_state_param = old_light_state.to_param()
new_state_param = new_light_state.to_param()
return {
key: value
for key, value in new_state_param.items()
if new_state_param.get(key) != old_state_param.get(key)
}
|
<filename>src/wizard/view/clsAddNewUnitPanel.py
import wx
from src.wizard.controller.frmRequiredValidator \
import RequiredValidator
from src.wizard.controller.frmRequiredComboValidator \
import RequiredComboValidator
class AddNewUnitPanelView ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 409,257 ), style = wx.TAB_TRAVERSAL )
self.SetMinSize(wx.Size(420, 257))
bSizer33 = wx.BoxSizer( wx.VERTICAL )
sbSizer7 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Required Fields:" ), wx.VERTICAL )
bSizer34 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText30 = wx.StaticText( sbSizer7.GetStaticBox(), wx.ID_ANY, u"Units Name", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText30.Wrap( -1 )
bSizer34.Add( self.m_staticText30, 0, wx.ALL, 5 )
bSizer34.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl26 = wx.TextCtrl( sbSizer7.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 280,-1 ), validator=RequiredValidator())
bSizer34.Add( self.m_textCtrl26, 0, wx.ALL, 5 )
sbSizer7.Add( bSizer34, 1, wx.EXPAND, 5 )
bSizer35 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText31 = wx.StaticText( sbSizer7.GetStaticBox(), wx.ID_ANY, u"Units Type", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText31.Wrap( -1 )
bSizer35.Add( self.m_staticText31, 0, wx.ALL, 5 )
bSizer35.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_comboBox13 = wx.ComboBox(sbSizer7.GetStaticBox(), wx.ID_ANY, u"Select Units Type", style=wx.CB_READONLY, validator=RequiredComboValidator())
self.m_comboBox13.SetMinSize( wx.Size( 280,-1 ) )
bSizer35.Add( self.m_comboBox13, 0, wx.ALL, 5 )
sbSizer7.Add( bSizer35, 1, wx.EXPAND, 5 )
bSizer341 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText301 = wx.StaticText( sbSizer7.GetStaticBox(), wx.ID_ANY, u"Units Abbreviation", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText301.Wrap( -1 )
bSizer341.Add( self.m_staticText301, 0, wx.ALL, 5 )
bSizer341.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl261 = wx.TextCtrl( sbSizer7.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 280,-1 ),validator=RequiredValidator() )
bSizer341.Add( self.m_textCtrl261, 0, wx.ALL, 5 )
sbSizer7.Add( bSizer341, 1, wx.EXPAND, 5 )
bSizer33.Add( sbSizer7, 1, wx.EXPAND, 5 )
sbSizer8 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Optional Fields:" ), wx.VERTICAL )
bSizer39 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText34 = wx.StaticText( sbSizer8.GetStaticBox(), wx.ID_ANY, u"Units Link", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText34.Wrap( -1 )
bSizer39.Add( self.m_staticText34, 0, wx.ALL, 5 )
bSizer39.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl29 = wx.TextCtrl( sbSizer8.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl29.SetMinSize( wx.Size( 280,-1 ) )
bSizer39.Add( self.m_textCtrl29, 0, wx.ALL, 5 )
sbSizer8.Add( bSizer39, 1, wx.EXPAND, 5 )
bSizer33.Add( sbSizer8, 1, wx.EXPAND, 5 )
m_sdbSizer3 = wx.StdDialogButtonSizer()
self.ok_button = wx.Button(self, wx.ID_OK)
m_sdbSizer3.AddButton(self.ok_button)
self.m_sdbSizer3Cancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer3.AddButton( self.m_sdbSizer3Cancel )
m_sdbSizer3.Realize()
bSizer33.Add( m_sdbSizer3, 1, wx.EXPAND, 5 )
self.ok_button.Bind(wx.EVT_BUTTON, self.onOK)
self.SetSizer( bSizer33 )
self.Layout()
def onOK(self, event):
pass
|
<reponame>GudniNatan/GSKI-PA6<filename>ui/ui.py
import typing
from dataclasses import asdict, fields
from ui.menu import Menu
from my_dataclasses import Sport, Member, Plays, Group
class UI(object):
"""Class for quick UI shortcuts."""
def get_member(self):
print("Enter member info:")
name = input("Name: ")
phone = input("Phone: ")
email = input("Email: ")
year_of_birth = self.get_int("Year of birth")
new_member = Member(name, phone, email, year_of_birth)
print("New member:", new_member)
return new_member
def update_member(self, old_member):
print(f"Update member {old_member}:")
name = input(f"Old name: {old_member.name}\nNew name: ")
phone = input(f"Old phone: {old_member.phone}\nNew phone: ")
email = input(f"Old email: {old_member.email}\nNew email: ")
print(f"Old year of birth: {old_member.year_of_birth}")
year_of_birth = self.get_int("New year of birth")
new_member = Member(name, phone, email, year_of_birth)
print("Updated member:", new_member, "\n")
return new_member
def new_sport(self, message="Create new sport:"):
print(message)
name = input("Name: ")
new_sport = Sport(name)
print("New sport:", new_sport, "\n")
return new_sport
def get_int(self, fieldname):
field = None
while field is None:
try:
field = int(input(fieldname + ": "))
except ValueError:
print(fieldname, "should be a whole number.")
return field
def new_group(self, sport):
print("Enter group info:")
age_from = self.get_int("Age from")
age_to = self.get_int("Age to")
max_size = self.get_int("Max size")
new_group = Group(sport, age_from, age_to, max_size)
print("New group", new_group, "\n")
return new_group
def choose(self, items: typing.Iterable, message: str = None):
"""Let user pick from list, returns picked item."""
options = {str(item): item for item in items}
message = "Choose an item:\n" if message is None else message
menu = Menu(message, options)
item_str, item = menu.get_input()
return item
def view_info(self, dataclass_instance):
print(self.get_info(dataclass_instance))
def get_info(self, dataclass_instance) -> str:
"""Get a string with detailed info about this dataclass instance."""
item = asdict(dataclass_instance)
class_type = type(dataclass_instance)
string = f"Detailed info for this {class_type.__name__}:\n"
for key, value in item.items():
key = str(key).replace("_", " ").capitalize()
string += "\n" + key + ": " + str(value)
return string + "\n"
def search(self, dataclass) -> dict:
"""Start a search for specific item, returns search parameters."""
print(f"Search {dataclass.__name__} repository")
print("Leave a field blank to not search with it")
parameters = dict()
for field in fields(dataclass):
try:
parameters[field.name] = field.type(input(field.name + ": "))
except ValueError:
parameters[field.name] = None
print()
return parameters
def search_result_choice(self, results, next, back, order_field=None,
message=""):
"""Get user choice from search results."""
options = [("Back", back)] + [(str(item), item) for item in results]
menu_msg = ""
if order_field == "sports":
menu_msg = "Ordered based on the first alphabetically ordered "
menu_msg += "sport users are registered for. \nUsers not "
menu_msg += "registered for any sports are hidden.\n"
if message:
menu_msg += message
else:
menu_msg += "Search results:"
menu = Menu(menu_msg, options)
string, item = menu.get_input()
if string == "Back":
return None, item
return item, next
def operation_result(self, result_message, undo_op, continue_op):
result_menu = Menu("Operation result:\n" + result_message,
{"Undo": undo_op, "Continue": continue_op})
key, operation = result_menu.get_input()
return operation
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'DatabaseVulnerabilityAssessmentRuleBaselineItemResponse',
'ElasticPoolPerDatabaseSettingsResponse',
'InstanceFailoverGroupReadOnlyEndpointResponse',
'InstanceFailoverGroupReadWriteEndpointResponse',
'ManagedInstancePairInfoResponse',
'PartnerRegionInfoResponse',
'SkuResponse',
'VulnerabilityAssessmentRecurringScansPropertiesResponse',
]
@pulumi.output_type
class DatabaseVulnerabilityAssessmentRuleBaselineItemResponse(dict):
"""
Properties for an Azure SQL Database Vulnerability Assessment rule baseline's result.
"""
def __init__(__self__, *,
result: Sequence[str]):
"""
Properties for an Azure SQL Database Vulnerability Assessment rule baseline's result.
:param Sequence[str] result: The rule baseline result
"""
pulumi.set(__self__, "result", result)
@property
@pulumi.getter
def result(self) -> Sequence[str]:
"""
The rule baseline result
"""
return pulumi.get(self, "result")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ElasticPoolPerDatabaseSettingsResponse(dict):
"""
Per database settings of an elastic pool.
"""
def __init__(__self__, *,
max_capacity: Optional[float] = None,
min_capacity: Optional[float] = None):
"""
Per database settings of an elastic pool.
:param float max_capacity: The maximum capacity any one database can consume.
:param float min_capacity: The minimum capacity all databases are guaranteed.
"""
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[float]:
"""
The maximum capacity any one database can consume.
"""
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[float]:
"""
The minimum capacity all databases are guaranteed.
"""
return pulumi.get(self, "min_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFailoverGroupReadOnlyEndpointResponse(dict):
"""
Read-only endpoint of the failover group instance.
"""
def __init__(__self__, *,
failover_policy: Optional[str] = None):
"""
Read-only endpoint of the failover group instance.
:param str failover_policy: Failover policy of the read-only endpoint for the failover group.
"""
if failover_policy is not None:
pulumi.set(__self__, "failover_policy", failover_policy)
@property
@pulumi.getter(name="failoverPolicy")
def failover_policy(self) -> Optional[str]:
"""
Failover policy of the read-only endpoint for the failover group.
"""
return pulumi.get(self, "failover_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFailoverGroupReadWriteEndpointResponse(dict):
"""
Read-write endpoint of the failover group instance.
"""
def __init__(__self__, *,
failover_policy: str,
failover_with_data_loss_grace_period_minutes: Optional[int] = None):
"""
Read-write endpoint of the failover group instance.
:param str failover_policy: Failover policy of the read-write endpoint for the failover group. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
:param int failover_with_data_loss_grace_period_minutes: Grace period before failover with data loss is attempted for the read-write endpoint. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
"""
pulumi.set(__self__, "failover_policy", failover_policy)
if failover_with_data_loss_grace_period_minutes is not None:
pulumi.set(__self__, "failover_with_data_loss_grace_period_minutes", failover_with_data_loss_grace_period_minutes)
@property
@pulumi.getter(name="failoverPolicy")
def failover_policy(self) -> str:
"""
Failover policy of the read-write endpoint for the failover group. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
"""
return pulumi.get(self, "failover_policy")
@property
@pulumi.getter(name="failoverWithDataLossGracePeriodMinutes")
def failover_with_data_loss_grace_period_minutes(self) -> Optional[int]:
"""
Grace period before failover with data loss is attempted for the read-write endpoint. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
"""
return pulumi.get(self, "failover_with_data_loss_grace_period_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedInstancePairInfoResponse(dict):
"""
Pairs of Managed Instances in the failover group.
"""
def __init__(__self__, *,
partner_managed_instance_id: Optional[str] = None,
primary_managed_instance_id: Optional[str] = None):
"""
Pairs of Managed Instances in the failover group.
:param str partner_managed_instance_id: Id of Partner Managed Instance in pair.
:param str primary_managed_instance_id: Id of Primary Managed Instance in pair.
"""
if partner_managed_instance_id is not None:
pulumi.set(__self__, "partner_managed_instance_id", partner_managed_instance_id)
if primary_managed_instance_id is not None:
pulumi.set(__self__, "primary_managed_instance_id", primary_managed_instance_id)
@property
@pulumi.getter(name="partnerManagedInstanceId")
def partner_managed_instance_id(self) -> Optional[str]:
"""
Id of Partner Managed Instance in pair.
"""
return pulumi.get(self, "partner_managed_instance_id")
@property
@pulumi.getter(name="primaryManagedInstanceId")
def primary_managed_instance_id(self) -> Optional[str]:
"""
Id of Primary Managed Instance in pair.
"""
return pulumi.get(self, "primary_managed_instance_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PartnerRegionInfoResponse(dict):
"""
Partner region information for the failover group.
"""
def __init__(__self__, *,
replication_role: str,
location: Optional[str] = None):
"""
Partner region information for the failover group.
:param str replication_role: Replication role of the partner managed instances.
:param str location: Geo location of the partner managed instances.
"""
pulumi.set(__self__, "replication_role", replication_role)
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="replicationRole")
def replication_role(self) -> str:
"""
Replication role of the partner managed instances.
"""
return pulumi.get(self, "replication_role")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Geo location of the partner managed instances.
"""
return pulumi.get(self, "location")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SkuResponse(dict):
"""
An ARM Resource SKU.
"""
def __init__(__self__, *,
name: str,
capacity: Optional[int] = None,
family: Optional[str] = None,
size: Optional[str] = None,
tier: Optional[str] = None):
"""
An ARM Resource SKU.
:param str name: The name of the SKU, typically, a letter + Number code, e.g. P3.
:param int capacity: Capacity of the particular SKU.
:param str family: If the service has different generations of hardware, for the same SKU, then that can be captured here.
:param str size: Size of the particular SKU
:param str tier: The tier or edition of the particular SKU, e.g. Basic, Premium.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the SKU, typically, a letter + Number code, e.g. P3.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
Capacity of the particular SKU.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def family(self) -> Optional[str]:
"""
If the service has different generations of hardware, for the same SKU, then that can be captured here.
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def size(self) -> Optional[str]:
"""
Size of the particular SKU
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The tier or edition of the particular SKU, e.g. Basic, Premium.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VulnerabilityAssessmentRecurringScansPropertiesResponse(dict):
"""
Properties of a Vulnerability Assessment recurring scans.
"""
def __init__(__self__, *,
email_subscription_admins: Optional[bool] = None,
emails: Optional[Sequence[str]] = None,
is_enabled: Optional[bool] = None):
"""
Properties of a Vulnerability Assessment recurring scans.
:param bool email_subscription_admins: Specifies that the schedule scan notification will be is sent to the subscription administrators.
:param Sequence[str] emails: Specifies an array of e-mail addresses to which the scan notification is sent.
:param bool is_enabled: Recurring scans state.
"""
if email_subscription_admins is not None:
pulumi.set(__self__, "email_subscription_admins", email_subscription_admins)
if emails is not None:
pulumi.set(__self__, "emails", emails)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="emailSubscriptionAdmins")
def email_subscription_admins(self) -> Optional[bool]:
"""
Specifies that the schedule scan notification will be is sent to the subscription administrators.
"""
return pulumi.get(self, "email_subscription_admins")
@property
@pulumi.getter
def emails(self) -> Optional[Sequence[str]]:
"""
Specifies an array of e-mail addresses to which the scan notification is sent.
"""
return pulumi.get(self, "emails")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[bool]:
"""
Recurring scans state.
"""
return pulumi.get(self, "is_enabled")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
from __future__ import division
import tensorflow as tf
class SSD_fcLoss:
def __init__(self,
alpha=1.0):
self.alpha = alpha
def smooth_L1_loss(self, y_true, y_pred):
absolute_loss = tf.abs(y_true - y_pred)
square_loss = 0.5 * (y_true - y_pred)**2
l1_loss = tf.where(tf.less(absolute_loss, 1.0), square_loss, absolute_loss - 0.5)
return tf.reduce_sum(l1_loss, axis=-1)
def log_loss(self, y_true, y_pred):
y_pred = tf.maximum(y_pred, 1e-15)
log_loss = -tf.reduce_sum(tf.pow(1-y_pred, 2) * y_true * tf.log(y_pred), axis=-1) # focal loss square term
return log_loss
def compute_loss(self, y_true, y_pred):
self.alpha = tf.constant(self.alpha)
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
n_boxes = tf.shape(y_pred)[1] # Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell.
# 1: Compute the losses for class and box predictions for every box.
classification_loss = tf.to_float(self.log_loss(y_true[:,:,:-12], y_pred[:,:,:-12])) # Output shape: (batch_size, n_boxes)
localization_loss = tf.to_float(self.smooth_L1_loss(y_true[:,:,-12:-8], y_pred[:,:,-12:-8])) # Output shape: (batch_size, n_boxes)
# 2: Compute the classification losses for the positive and negative targets.
negatives = y_true[:,:,0] # Tensor of shape (batch_size, n_boxes)
positives = tf.to_float(tf.reduce_max(y_true[:,:,1:-12], axis=-1)) # Tensor of shape (batch_size, n_boxes)
# Count the number of positive boxes (classes 1 to n) in y_true across the whole batch.
n_positive = tf.reduce_sum(positives)
pos_class_loss = tf.reduce_sum(classification_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# Compute the classification loss for the negative default boxes (if there are any).
neg_class_loss_all = classification_loss * negatives # Tensor of shape (batch_size, n_boxes)
n_neg_losses = tf.count_nonzero(neg_class_loss_all, dtype=tf.int32) # The number of non-zero loss entries in `neg_class_loss_all`
n_negative_keep = n_neg_losses ## keep all negative boxes with non-zero losses ##
# In the unlikely case when either (1) there are no negative ground truth boxes at all
# or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`.
def f1():
return tf.zeros([batch_size])
# Otherwise compute the negative loss.
def f2():
# reshape `neg_class_loss_all` to 1D
neg_class_loss_all_1D = tf.reshape(neg_class_loss_all, [-1]) # Tensor of shape (batch_size * n_boxes,)
# ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those...
values, indices = tf.nn.top_k(neg_class_loss_all_1D,
k=n_negative_keep,
sorted=False) # We don't need them sorted.
# ...and with these indices we'll create a mask...
negatives_keep = tf.scatter_nd(indices=tf.expand_dims(indices, axis=1),
updates=tf.ones_like(indices, dtype=tf.int32),
shape=tf.shape(neg_class_loss_all_1D)) # Tensor of shape (batch_size * n_boxes,)
negatives_keep = tf.to_float(tf.reshape(negatives_keep, [batch_size, n_boxes])) # Tensor of shape (batch_size, n_boxes)
# ...and use it to keep only those boxes and mask all other classification losses
neg_class_loss = tf.reduce_sum(classification_loss * negatives_keep, axis=-1) # Tensor of shape (batch_size,)
return neg_class_loss
neg_class_loss = tf.cond(tf.equal(n_neg_losses, tf.constant(0)), f1, f2)
class_loss = pos_class_loss + neg_class_loss # Tensor of shape (batch_size,)
# 3: Compute the localization loss for the positive targets.
loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1) # Tensor of shape (batch_size,)
# 4: Compute the total loss.
total_loss = (class_loss + self.alpha * loc_loss) / tf.maximum(1.0, n_positive) # In case `n_positive == 0`
total_loss = total_loss * tf.to_float(batch_size)
return total_loss
|
"""Django ORM models for Social Auth"""
import base64
import six
import sys
from django.db import transaction
from django.db.utils import IntegrityError
from social_core.storage import UserMixin, AssociationMixin, NonceMixin, \
CodeMixin, PartialMixin, BaseStorage
from seahub.base.accounts import User
class DjangoUserMixin(UserMixin):
"""Social Auth association model"""
@classmethod
def changed(cls, user):
user.save()
def set_extra_data(self, extra_data=None):
if super(DjangoUserMixin, self).set_extra_data(extra_data):
self.save()
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
if association_id is not None:
qs = cls.objects.exclude(id=association_id)
else:
qs = cls.objects.exclude(provider=backend_name)
qs = qs.filter(username=user.username)
if hasattr(user, 'has_usable_password'):
valid_password = user.has_usable_password()
else:
valid_password = True
return valid_password or qs.count() > 0
@classmethod
def disconnect(cls, entry):
entry.delete()
@classmethod
def username_field(cls):
return 'username'
# return getattr(cls.user_model(), 'USERNAME_FIELD', 'username')
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
if 'username' in kwargs:
kwargs[cls.username_field()] = kwargs.pop('username')
assert 'username' in kwargs
try:
User.objects.get(email=kwargs['username'])
return True
except User.DoesNotExist:
return False
# return cls.user_model().objects.filter(*args, **kwargs).count() > 0
@classmethod
def get_username(cls, user):
return getattr(user, cls.username_field(), None)
@classmethod
def create_user(cls, *args, **kwargs):
username_field = cls.username_field()
if 'username' in kwargs and username_field not in kwargs:
kwargs[username_field] = kwargs.pop('username')
assert 'username' in kwargs
user = User.objects.create_user(email=kwargs['username'],
is_active=True)
# try:
# if hasattr(transaction, 'atomic'):
# # In Django versions that have an "atomic" transaction decorator / context
# # manager, there's a transaction wrapped around this call.
# # If the create fails below due to an IntegrityError, ensure that the transaction
# # stays undamaged by wrapping the create in an atomic.
# with transaction.atomic():
# user = cls.user_model().objects.create_user(*args, **kwargs)
# else:
# user = cls.user_model().objects.create_user(*args, **kwargs)
# except IntegrityError:
# # User might have been created on a different thread, try and find them.
# # If we don't, re-raise the IntegrityError.
# exc_info = sys.exc_info()
# # If email comes in as None it won't get found in the get
# if kwargs.get('email', True) is None:
# kwargs['email'] = ''
# try:
# user = cls.user_model().objects.get(*args, **kwargs)
# except cls.user_model().DoesNotExist:
# six.reraise(*exc_info)
return user
@classmethod
def get_user(cls, pk=None, **kwargs):
if pk:
kwargs = {'pk': pk}
try:
return User.objects.get(email=pk)
except User.DoesNotExist:
return None
# try:
# return cls.user_model().objects.get(**kwargs)
# except cls.user_model().DoesNotExist:
# return None
@classmethod
def get_users_by_email(cls, email):
user_model = cls.user_model()
email_field = getattr(user_model, 'EMAIL_FIELD', 'email')
return user_model.objects.filter(**{email_field + '__iexact': email})
@classmethod
def get_social_auth(cls, provider, uid):
if not isinstance(uid, six.string_types):
uid = str(uid)
try:
return cls.objects.get(provider=provider, uid=uid)
except cls.DoesNotExist:
return None
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
qs = cls.objects.filter(username=user.username)
if provider:
qs = qs.filter(provider=provider)
if id:
qs = qs.filter(id=id)
return qs
@classmethod
def create_social_auth(cls, user, uid, provider):
if not isinstance(uid, six.string_types):
uid = str(uid)
if hasattr(transaction, 'atomic'):
# In Django versions that have an "atomic" transaction decorator / context
# manager, there's a transaction wrapped around this call.
# If the create fails below due to an IntegrityError, ensure that the transaction
# stays undamaged by wrapping the create in an atomic.
with transaction.atomic():
social_auth = cls.objects.create(username=user.username, uid=uid, provider=provider)
else:
social_auth = cls.objects.create(username=user.username, uid=uid, provider=provider)
return social_auth
class DjangoNonceMixin(NonceMixin):
@classmethod
def use(cls, server_url, timestamp, salt):
return cls.objects.get_or_create(server_url=server_url,
timestamp=timestamp,
salt=salt)[1]
class DjangoAssociationMixin(AssociationMixin):
@classmethod
def store(cls, server_url, association):
# Don't use get_or_create because issued cannot be null
try:
assoc = cls.objects.get(server_url=server_url,
handle=association.handle)
except cls.DoesNotExist:
assoc = cls(server_url=server_url,
handle=association.handle)
assoc.secret = base64.encodestring(association.secret)
assoc.issued = association.issued
assoc.lifetime = association.lifetime
assoc.assoc_type = association.assoc_type
assoc.save()
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.filter(*args, **kwargs)
@classmethod
def remove(cls, ids_to_delete):
cls.objects.filter(pk__in=ids_to_delete).delete()
class DjangoCodeMixin(CodeMixin):
@classmethod
def get_code(cls, code):
try:
return cls.objects.get(code=code)
except cls.DoesNotExist:
return None
class DjangoPartialMixin(PartialMixin):
@classmethod
def load(cls, token):
try:
return cls.objects.get(token=token)
except cls.DoesNotExist:
return None
@classmethod
def destroy(cls, token):
partial = cls.load(token)
if partial:
partial.delete()
class BaseDjangoStorage(BaseStorage):
user = DjangoUserMixin
nonce = DjangoNonceMixin
association = DjangoAssociationMixin
code = DjangoCodeMixin
|
# @Time : 2021/08/01
# @Author : <NAME>
# @Email : <EMAIL>
r"""
GPT-2
################################################
Reference:
Radford et al. "Language models are unsupervised multitask".
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from textbox.model.abstract_generator import Seq2SeqGenerator
from torch.nn.utils.rnn import pad_sequence
from transformers import GPT2LMHeadModel, GPT2TokenizerFast, GPT2Config
from math import ceil
class GPT2Seq(Seq2SeqGenerator):
r"""GPT-2 is an auto-regressive language model with stacked Transformer decoders.
"""
def __init__(self, config, dataset):
super(GPT2Seq, self).__init__(config, dataset)
self.max_source_length = dataset.max_source_length
self.max_target_length = dataset.max_target_length
self.pretrained_model_path = config['pretrained_model_path']
self.tokenizer = GPT2TokenizerFast.from_pretrained(self.pretrained_model_path, pad_token='[PAD]')
self.eos_token = self.tokenizer.eos_token
self.padding_token_idx = self.tokenizer.pad_token_id
self.configuration = GPT2Config.from_pretrained(
self.pretrained_model_path,
pad_token_id=self.padding_token_idx
)
self.model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path, config=self.configuration)
self.model.resize_token_embeddings(len(self.tokenizer))
if config['task_type'] == "summarization":
self.task_text = "TL;DR:"
elif config['task_type'] == "translation":
self.task_text = "story:"
elif config['task_type'] == "multi_dialog":
self.task_text = "question:"
else:
raise NotImplementedError("Only summarization and translation are supported.")
self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')
def generate(self, batch_data, eval_data):
source_text = batch_data['source_text']
generate_corpus = []
for src in source_text:
input_ids = self.tokenize_text(src, self.task_text, self.max_source_length).unsqueeze(0)
sample_outputs = self.model.generate(
input_ids,
num_beams=4, max_length=input_ids.size(1) + self.max_target_length, early_stopping=True,
)
generated_text = self.tokenizer.decode(sample_outputs[0][input_ids.size(1) + 1:], skip_special_tokens=True)
generated_text = generated_text.split()
generate_corpus.append(generated_text)
return generate_corpus
def tokenize_text(self, text, suff_text, max_length):
suff_dict = self.tokenizer(' ' + suff_text, return_tensors="pt")
suff_ids = suff_dict['input_ids'].to(self.device)[0]
texts = ' '.join(text)
encoding_dict = self.tokenizer(texts, max_length=max_length-suff_ids.size(0), truncation=True, return_tensors="pt")
input_ids = encoding_dict['input_ids'].to(self.device)[0]
input_ids = torch.cat((input_ids, suff_ids)).long()
return input_ids
def forward(self, corpus, epoch_idx=-1, nll_test=False):
source_text = corpus['source_text']
target_text = corpus['target_text']
input_ids = []
src_length = []
for src, tgt in zip(source_text, target_text):
src_ids = self.tokenize_text(src, self.task_text, self.max_source_length)
tgt_ids = self.tokenize_text(tgt, self.eos_token, self.max_target_length)
input_id = torch.cat((src_ids, tgt_ids))
src_length.append(src_ids.size(0))
input_ids.append(input_id)
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.padding_token_idx)
attn_masks = input_ids != self.padding_token_idx
loss_masks = attn_masks.clone().detach()
for i, l in enumerate(src_length):
loss_masks[i][:l] = 0
decoder_input_ids = input_ids[:, :-1].contiguous()
decoder_target_ids = input_ids[:, 1:].contiguous()
attn_masks = attn_masks[:, :-1].contiguous()
loss_masks = loss_masks[:, :-1].contiguous()
outputs = self.model(decoder_input_ids, attention_mask=attn_masks, use_cache=False)
token_logits = outputs.logits
loss = self.loss(token_logits.view(-1, token_logits.size(-1)), decoder_target_ids.view(-1))
loss = loss.reshape_as(decoder_target_ids) * loss_masks
length = ((decoder_target_ids != self.padding_token_idx) * loss_masks).sum(dim=1).float()
loss = loss.sum(dim=1) / length
return loss.mean()
|
<reponame>kevinkit/polyproto<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:48:41 2020
@author: Kevin
"""
from polyproto.drawFunctions import drawRandomCircle,drawRandomLine,drawRandomEllipse,drawRandomRectangle,drawRandomPolygon
import numpy as np
import tensorflow as tf
if int(tf.__version__.split(".")[0]) >= 2:
from keras.utils import to_categorical, Sequence
else:
from keras.utils import to_categorical, Sequence
if int(tf.__version__.split(".")[0]) >= 2:
from keras.utils import to_categorical, Sequence
else:
from keras.utils import to_categorical, Sequence
class GeometricGenerator(Sequence):
def __init__(self,
width=200,
height=200,
channels=3,
forms=3,
batch_mul=2,
epoch_length=100,
max_background_noise=128,
list_of_form_creators=[drawRandomCircle,
drawRandomLine,
drawRandomRectangle,
drawRandomEllipse],
difficulty=0.8):
"""
@brief This is a simple generator for creating common geometric shapes
@param width image width
@param height image height
@param channels Image channels, note that only 3-Channels are supported at the moment
@param forms Amount of different forms to be drawn, this is also the amount of classes produced
@param batch_mul Multiplicator to create a batch. If you use 3 forms and a batch_mul of 2
the resulting batch_size will have the size of 6 (batch_mul*forms)
@param epoch_length Length of one epoch
@param max_background_noise A random background will be used, this describes the maxmium
@param list_of_form_creators here you can give in a list of functions that will be called
to create a form
@param difficulty Will set a percentage of the image to black
"""
self.image_size = (width,height)
self.width = width
self.height = height
self.channels = channels
self.image_flat_size = width*height
self.blacks = int(difficulty * width * height)
self.epoch_length = epoch_length
if forms > len(list_of_form_creators):
raise ValueError("amount of forms cannot be larger than given functions")
self.forms = forms
self.list_of_form_creators = list_of_form_creators
self.max_background_noise = max_background_noise
self.batch_mul = batch_mul
self.difficulty = difficulty
def __len__(self):
# here we can just say whatever we find fitting for one epoch
return self.epoch_length
def __getitem__(self,idx):
X = np.random.randint(0,self.max_background_noise,size=(self.batch_mul*self.forms,
self.width,
self.height,
self.channels))
Y = np.zeros(shape=(self.batch_mul*self.forms))
cnt = 0
for i in range(self.batch_mul):
for j in range(self.forms):
tmp_image = self.list_of_form_creators[j](X[cnt])
#get random black coordaintes
x_random = np.random.randint(0,tmp_image.shape[0],size=self.blacks)
y_random = np.random.randint(0,tmp_image.shape[0],size=self.blacks)
for x,y in zip(x_random,y_random):
tmp_image[x,y,:] = 0
X[cnt] = tmp_image
Y[cnt] = j
cnt += 1
idx = np.arange(0,len(X))
np.random.shuffle(idx)
X = X[idx]
Y = Y[idx]
return X/255,to_categorical(Y)
class GeometricNGenerator(Sequence):
def __init__(self,
width=200,
height=200,
channels=3,
forms=8,
batch_mul=2,
epoch_length=100,
maximum_vertices=20,
pts=None,
seed=3121991,
max_background_noise=128,
difficulty=0.8):
"""
@brief This is a simple generator for creating polygons
@param width image width
@param height image height
@param channels Image channels, note that only 3-Channels are supported at the moment
@param forms Amount of different forms to be drawn, this is also the amount of classes produced
@param batch_mul Multiplicator to create a batch. If you use 3 forms and a batch_mul of 2
the resulting batch_size will have the size of 6 (batch_mul*forms)
@param epoch_length Length of one epoch
@param maximum_vertices Maximum vertices for for a polygon, will be used if pts are None
@param pts to create polygons from. Set to None if not used
@param seed seed for numpy to make reproducible experiments
@param max_background_noise A random background will be used, this describes the maxmium
@param difficulty Will set a percentage of the image to black
"""
np.random.seed(seed)
# will use random then
if pts is None:
self.random_pts = []
for i in range(forms):
# random length of vertices
amount_of_vertices = np.random.randint(2,maximum_vertices)
x_cords = np.random.randint(0,width,size=amount_of_vertices)
y_cords = np.random.randint(0,height,size=amount_of_vertices)
pts = np.array([x_cords,y_cords]).T
pts = pts.reshape((-1,1,2))
self.random_pts.append(pts)
else:
self.random_pts = pts
self.forms = forms
self.image_size = (width,height)
self.width = width
self.height = height
self.channels = channels
self.epoch_length = epoch_length
self.max_background_noise = max_background_noise
self.batch_mul = batch_mul
self.image_flat_size = width*height
self.blacks = int(difficulty * width * height)
def __len__(self):
# here we can just say whatever we find fitting for one epoch
return self.epoch_length
def __getitem__(self,idx):
X = np.random.randint(0,self.max_background_noise ,size=(self.batch_mul*self.forms,
self.width,
self.height,
self.channels))
Y = np.zeros(shape=(self.batch_mul*self.forms))
cnt = 0
for i in range(self.batch_mul):
for j in range(self.forms):
tmp_image = drawRandomPolygon(X[cnt],self.random_pts[j])
x_random = np.random.randint(0,tmp_image.shape[0],size=self.blacks)
y_random = np.random.randint(0,tmp_image.shape[0],size=self.blacks)
for x,y in zip(x_random,y_random):
tmp_image[x,y,:] = 0
X[cnt] = tmp_image
Y[cnt] = j
cnt += 1
idx = np.arange(0,len(X))
np.random.shuffle(idx)
X = X[idx]
Y = Y[idx]
return X/255,to_categorical(Y) |
<filename>tojs.py
#!/usr/bin/env python2
# Author: <NAME> <<EMAIL>>
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import xml.etree.ElementTree as ET
import sys
import os
import json
TOJS_VERSION = "1.0"
def toJsString(s):
"""
This function converts a string to the Javascript literal syntax
"""
# [MTAB-366]
# If we have unicode entities in this attribute value we must replace
# the corresponding unicode character with the javascript escape sequence
result = "\""
for c in s:
if c=='\"':
result += "\\\""
elif ord(c)>=32 and ord(c)<=126:
result += c
else:
result += "\\u"+("%0.4X" % ord(c))
result += "\""
return result
def tagIsHidden(tagName):
"""
Check if an tag isn't special for the generator
"""
if tagName=="Script": return True
elif tagName=="Constructor": return True
elif tagName=="Destructor": return True
elif tagName=="Properties": return True
elif tagName=="Declarations": return True
else: return False
def attributeIsHidden(attName):
"""
Check if an attribute doesn't represent a property
"""
if attName=="addMethod": return True
elif attName=="addContext": return True
elif attName=="id": return True
elif attName=="layoutParams": return True
elif attName=="enclosedIn": return True
elif attName=="constructorArgs": return True
elif isEventHandlerDeclaration(attName): return True
else: return False
def toSetterName(s):
"""
This function get the setter name from
the property name
"""
return "set" + s[0].upper() + s[1:]
def toAttributeValue(s):
if s[0:3]=="tr:":
return "this.tr(%s)" % toJsString(s[3:])
elif s[0:4]=="trc:":
v = s[4:].split("|")
return "this.trc(%s, %s)" % (toJsString(v[0]), toJsString(v[1]))
elif s[0:5]=="json:":
return s[5:]
elif s[0:3]=="js:":
return s[3:]
else:
return toJsString(s)
def isEventHandlerDeclaration(attName):
"""
Check if an XML attribute represents a event
declaration
"""
return attName[0:2]=="on" and attName[2].isupper()
class Generator(object):
def __init__(self, config):
self.__lastChildId = 0
self.__eventHandlers = []
self.__config = config
def __gensym(self):
self.__lastChildId+=1
return "__child%i" % self.__lastChildId
def __wr(self, line):
# Here I always use the windows CRLF sequence. This is really
# useful for out VCS mental health
if "\r" not in line:
line = line.replace("\n", "\r\n")
self.ostream.write(line + "\r\n")
def __emitEventHandlers(self, varName, elem):
"""
This function emits all the code that add event handlers
declared in the XML file
"""
for name in elem.attrib:
if isEventHandlerDeclaration(name):
eventName = name[2].lower() + name[3:]
eventMethod = elem.attrib[name]
if eventMethod not in self.__eventHandlers:
self.__eventHandlers.append(eventMethod)
self.__wr(" %s.addListener(%s, this.%s, this);" %
(varName, toJsString(eventName), eventMethod))
def __emitAttributes(self, varName, elem):
"""
This function emits all the code that set properties on
components
"""
for name in elem.attrib:
if not attributeIsHidden(name):
self.__wr(" %s.%s(%s);" % (varName, toSetterName(name), toAttributeValue(elem.attrib[name])) )
def __emitAdd(self, parent, childElem, childVar):
"""
This function emits all the code for adding the child
component to the parent
"""
child = childElem.tag
addContext = parent
addMethod = "add"
if addContext=="": addContext="this"
if "addContext" in childElem.attrib:
addContext = childElem.attrib["addContext"]
if "addMethod" in childElem.attrib:
addMethod = childElem.attrib["addMethod"]
if addContext.startswith("this.this"):
addContext = addContext[5:]
if "layoutParams" in childElem.attrib:
addParameters = ", " + childElem.attrib["layoutParams"]
else:
addParameters = ""
if "enclosedIn" in childElem.attrib:
beforeEnclosure = childElem.attrib["enclosedIn"] + "("
afterEnclosure = ")"
else:
beforeEnclosure = ""
afterEnclosure = ""
self.__wr(" %s.%s(%sthis.%s%s%s);" % (addContext, addMethod, beforeEnclosure, childVar, afterEnclosure, addParameters))
def __emitChildren(self, varName, elem):
"""
This function emits the code that creates the children
of a component
"""
for child in elem:
if not tagIsHidden(child.tag):
if child.tag=="Select":
envname = child.attrib["name"].strip()
envval = child.attrib["values"].strip()
if "getter" in child.attrib:
envgetter = child.attrib["getter"].strip()
else:
envgetter = "qx.core.Environment.get"
expr = "qx.lang.Array.contains(%s, %s(%s))" % (envval, envgetter, toJsString(envname))
self.__wr(" if(%s) {" % expr)
self.__emitChildren(varName, child)
self.__wr(" }")
elif child.tag=="GroupHeader":
self.__wr(" this.%s.addGroupHeader(%s);" % (varName,toAttributeValue(child.text)) )
else:
childVar = self.__gensym()
if "constructorArgs" in child.attrib:
constructorArgs = child.attrib["constructorArgs"]
else:
constructorArgs = ""
self.__wr(" this.%s = new %s(%s);" % (childVar, self.__config.tagToClassName(child.tag), constructorArgs))
if "id" in child.attrib:
self.__wr(" this.%s = this.%s;" % (child.attrib["id"], childVar))
self.__emitAttributes("this."+childVar, child)
self.__emitEventHandlers("this."+childVar, child)
self.__wr("")
self.__emitChildren(childVar, child)
self.__emitAdd("this."+varName, child, childVar)
def reset(self):
"""
Reset the generator at the initial
state. Only the configuration is
retained
"""
self.__lastChildId = 0
self.__eventHandlers = []
def getScript(self, elem):
s = elem.findtext("./Script")
if s==None: s = ""
return s
def getConstructor(self, elem):
s = elem.findtext("./Constructor")
if s==None: s = ""
return s
def getDestructor(self, elem):
s = elem.findtext("./Destructor")
if s==None: s = ""
return s
def getProperties(self, elem):
s = elem.findtext("./Properties")
if s==None: s = ""
return s
def getDeclarations(self, elem):
s = elem.findtext("./Declarations")
if s==None: s = ""
return s
def startMixin(self, inputFile, outputFile, className):
"""
This function starts the generation of a mixin reading from
"inputFile" and writing to "outputFile" the class
with name "className"
"""
self.reset()
elem = ET.parse(open(inputFile, "rb")).getroot()
self.ostream = open(outputFile, "wb")
self.__wr("/* Code generated by ToJs v. %s */" % TOJS_VERSION)
self.__wr("qx.Mixin.define(%s, {" % toJsString(className) )
self.__wr("")
self.__wr(" members: {")
self.__wr(" _createComponents: function() {")
self.__wr("")
self.__emitAttributes("this", elem)
self.__emitEventHandlers("this", elem)
self.__wr("")
self.__emitChildren("this", elem)
if len(self.__eventHandlers)>0: sep = ","
else: sep = ""
self.__wr(" }")
self.__wr(" },")
self.__wr("")
self.__wr(" destruct: function() {")
for i in range(self.__lastChildId):
self.__wr(" this._disposeObjects([%s]);" % toJsString("__child"+str(i+1)) )
self.__wr(" }")
self.__wr("});")
self.ostream.close()
def startClass(self, inputFile, outputFile, className):
"""
This function starts the generation of a class reading from
"inputFile" and writing to "outputFile" the class
with name "className"
"""
self.reset()
elem = ET.parse(open(inputFile, "rb")).getroot()
self.ostream = open(outputFile, "wb")
self.__wr("/* Code generated by ToJs v. %s */" % TOJS_VERSION)
self.__wr("qx.Class.define(%s, {" % toJsString(className) )
self.__wr("")
self.__wr(" extend : %s," % self.__config.tagToClassName(elem.tag))
self.__wr("")
decl = self.getDeclarations(elem)
if len(decl.strip())>0:
self.__wr(decl)
self.__wr(",")
constr = self.getConstructor(elem)
if len(constr.strip())>0:
self.__wr(" construct : function()")
self.__wr(" {")
self.__wr(" this.base(arguments);")
self.__wr(constr)
self.__wr(" },")
prop = self.getProperties(elem)
if len(prop.strip())>0:
self.__wr(" properties : ")
self.__wr(" {")
self.__wr(prop.strip())
self.__wr(" },")
self.__wr(" members: {")
self.__wr(" _createComponents: function() {")
self.__wr("")
self.__emitAttributes("this", elem)
self.__emitEventHandlers("this", elem)
self.__wr("")
self.__emitChildren("this", elem)
if len(self.__eventHandlers)>0: sep = ","
else: sep = ""
self.__wr(" }")
script = self.getScript(elem).strip()
if len(script)>0:
self.__wr(" ,")
for row in script.split("\n"):
if not row.strip().endswith("//#no"):
self.__wr(row)
self.__wr(" },")
self.__wr("")
self.__wr(" destruct: function() {")
for i in range(self.__lastChildId):
self.__wr(" this._disposeObjects([%s]);" % toJsString("__child"+str(i+1)) )
destr = self.getDestructor(elem)
if len(destr.strip())>0:
self.__wr(destr)
self.__wr(" }")
self.__wr("});")
self.ostream.close()
class GeneratorConfig(object):
"""
This class contains the configuration variables
for the generator
"""
def __init__(self):
self.__aliases = {}
self.__defaultPkg = ""
def readFile(self, fileName):
"""
Read the configuration from the specified file
"""
f = open(fileName, "rb")
conf = json.load(f)
f.close()
self.__defaultPkg = conf["defaultPackage"].encode('ascii','ignore')
for name in conf["aliases"]:
self.__aliases[name]=conf["aliases"][name].encode('ascii','ignore')
def tagToClassName(self, name):
"""
Converts, according to the configuration, a tag
name to a class name
"""
if "." in name:
# Name contains the package
return name
elif name not in self.__aliases:
return self.__defaultPkg + "." + name
else:
return self.__aliases[name]
def sourceContainsMixIn(fileName):
"""
Check the root tag of the source file to see if
the file contains a mixin or a class
"""
try:
elem = ET.parse(fileName).getroot()
except Exception as e:
print "Errore durante il parsing di ", fileName
sys.exit(1)
return elem.tag=="tojs"
def sourceContainsClass(fileName):
"""
Check the root tag of the source file to see if
the file contains a mixin or a class
"""
try:
elem = ET.parse(fileName).getroot()
except Exception as e:
print "Errore durante il parsing di ", fileName
sys.exit(1)
return elem.tag!="tojs"
def getTargetClassName(classdir, prefix, sourceFileName):
className = sourceFileName[len(classdir)+1:-4].replace("\\", ".").replace("/", ".")
# prepend the gen prefix between the
# project namespace and the class name
classNameSplit = className.split(".")
className = classNameSplit[0]+".gen."+".".join(classNameSplit[1:-1]) + "." + prefix + classNameSplit[-1]
className = className.replace("..", ".")
return className
def getTargetFileName(classdir, className):
return os.path.normpath(os.path.join(classdir, "./" + className.replace(".","/") + ".js"))
def generateMixIn(g, classdir, sourceFileName):
className = getTargetClassName(classdir, "M", sourceFileName)
destFileName = getTargetFileName(classdir, className)
if not os.path.exists(os.path.dirname(destFileName)):
os.makedirs(os.path.dirname(destFileName))
print " - Mixin",className
g.startMixin(sourceFileName, destFileName, className)
def generateClass(g, classdir, sourceFileName):
className = getTargetClassName(classdir, "", sourceFileName)
destFileName = getTargetFileName(classdir, className)
if not os.path.exists(os.path.dirname(destFileName)):
os.makedirs(os.path.dirname(destFileName))
print " - Class",className
g.startClass(sourceFileName, destFileName, className)
def main():
if len(sys.argv)<2:
print "Use: %s <applicationDir>" % sys.argv[0]
return
rootdir = os.path.normpath(sys.argv[1])
configFile = os.path.join(rootdir, "tojs_config.json")
if not os.path.exists(configFile):
raise Exception("Can't find config file at %s" % configFile)
print "----------------------------------------------------------------------------"
print " Mixin generation"
print "----------------------------------------------------------------------------"
print
print ">>> Reading configuration"
print " -",configFile
print
print ">>> Generate code"
conf = GeneratorConfig()
conf.readFile(configFile)
configMTime = os.path.getmtime(configFile)
g = Generator(conf)
classdir = os.path.normpath(os.path.join(rootdir, "source/class"))
for (dirname, subdirs, files) in os.walk(classdir):
for f in files:
if f.lower().endswith(".xml"):
sourceFileName = os.path.normpath(os.path.join(dirname, f))
if sourceContainsMixIn(sourceFileName):
className = getTargetClassName(classdir, "M", sourceFileName)
elif sourceContainsClass(sourceFileName):
className = getTargetClassName(classdir, "", sourceFileName)
else:
print "ERROR: Wrong base tag", sourceFileName
destFileName = getTargetFileName(classdir, className)
# Here we need to rigenerate only the necessary classes
# to enable the Qooxdoo checker to read only the
# modified classes
sourceMTime = os.path.getmtime(sourceFileName)
if os.path.exists(destFileName):
destMTime = os.path.getmtime(destFileName)
mustCompile = destMTime < sourceMTime or destMTime < configMTime
else :
mustCompile = True
if mustCompile:
if sourceContainsMixIn(sourceFileName):
generateMixIn(g, classdir, sourceFileName)
elif sourceContainsClass(sourceFileName):
generateClass(g, classdir, sourceFileName)
else:
print "ERROR: Wrong base tag", sourceFileName
if __name__=="__main__": main()
|
r"""Provides tools to parse and convert PEG grammars and expressions
Much of the syntax is pretty intuitive. `|` is for ordered choice, ` ` to
join expressions, `!` for negative lookahead, `&` for positive lookahead, `*`
for zero or more matches, `+` for one or more matches, `?` for an optional
match, `.` for any character, and brackets `()` for grouping.
There are some differences however. A double quoted string means all characters
must match in order, whereas a single quoted string means one of the characters
are to be matched.
There are a few extra operations to help improve the parse tree. A backticked
string inserts the string in the result. Curly brackets around an expression
wraps the result in a tuple. A `~` prefix fuses the result into a single
string. A `:` prefix ignores the result.
>>> rule = create_rule("'ab'+")
>>> rule
OneOrMore(ExpectChoice('ab'))
>>> print(rule)
'ab'+
>>> grammar = create_grammar("ab <- 'ab'+;")
>>> grammar
Grammar(rules={'ab': OneOrMore(ExpectChoice('ab'))}, start='ab')
>>> print(grammar)
ab <- 'ab'+;
Here is the grammar that this module uses to parse strings. Start rules are
grammar_start and expression_start for grammars and expressions respectively.
grammar_start <- _ grammar _ !.
grammar <- { `grammar` definition (_ definition)* }
definition <- { `definition` name _ :"<-" _ expression (_ :";")? }
expression_start <- _ expression _ !.
expression <- { `choices` joined (_ :"|" _ joined)+ } | joined
joined <- { `joined` prefixed (_ prefixed)+ } | prefixed
prefixed <- error | { `prefixed` (':~&!' _)+ repeated } | repeated
repeated <- { `repeated` atom (_ '*+?')+ } | atom
atom <- expectany | expectall | inject | dot | reference | curly | round
curly <- { `packed` :"{" _ (expression | { `empty` () }) _ :"}" }
round <- :"(" _ (expression | { `empty` () }) _ :")"
error <- { `error` :"^" _ (sstring | dstring) }
expectany <- { `expectany` sstring }
expectall <- { `expectall` dstring }
inject <- { `inject` bstring }
dot <- { `dot` "." }
reference <- { `reference` name !(_ "<-") }
sstring <- ~("'" ("\\" . | !"'" .)+ "'")
dstring <- ~("\"" ("\\" . | !"\"" .)+ "\"")
bstring <- ~("`" ("\\" . | !"`" .)+ "`")
name <- ~'_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'+
_ <- :(' \t\n' | "#" (!("\n" | !.) .)* "\n"?)*
"""
from ast import literal_eval
from .grammar import Grammar
from .rule import *
__all__ = ["create_rule", "create_grammar"]
# This holds the rules to parse PEG grammars. Note that this grammar has no
# error messages. A grammar for PEG that has error messages is in peg.peg.
grammar = Grammar(rules={
"grammar_start": Join(
Reference("_"),
Reference("grammar"),
Reference("_"),
Negative(Any()),
),
"grammar": Pack(Join(Inject("grammar"),
Reference("definition"),
Repeat(Join(
Reference("_"),
Reference("definition"),
)),
)),
"definition": Pack(Join(Inject("definition"),
Reference("name"),
Reference("_"),
Ignore(ExpectJoin("<-")),
Reference("_"),
Reference("expression"),
Optional(Join(
Reference("_"),
Ignore(Expect(";")),
)),
)),
"expression_start": Join(
Reference("_"),
Reference("expression"),
Reference("_"),
Negative(Any()),
),
"expression": Choice(
Pack(Join(Inject("choices"),
Reference("joined"),
OneOrMore(Join(
Reference("_"),
Ignore(Expect("|")),
Reference("_"),
Reference("joined"),
)),
)),
Reference("joined"),
),
"joined": Choice(
Pack(Join(Inject("joined"),
Reference("prefixed"),
OneOrMore(Join(
Reference("_"),
Reference("prefixed"),
)),
)),
Reference("prefixed"),
),
"prefixed": Choice(
Reference("error"),
Pack(Join(Inject("prefixed"),
OneOrMore(Join(
ExpectChoice(":~&!"),
Reference("_"),
)),
Reference("repeated"),
)),
Reference("repeated"),
),
"repeated": Choice(
Pack(Join(Inject("repeated"),
Reference("atom"),
OneOrMore(Join(
Reference("_"),
ExpectChoice("*+?"),
)),
)),
Reference("atom"),
),
"atom": Choice(
Reference("expectany"),
Reference("expectall"),
Reference("inject"),
Reference("dot"),
Reference("reference"),
Reference("curly"),
Reference("round"),
),
"curly": Pack(Join(Inject("packed"),
Ignore(Expect("{")),
Reference("_"),
Choice(
Reference("expression"),
Pack(Join(Inject("empty"), Empty())),
),
Reference("_"),
Ignore(Expect("}")),
)),
"round": Join(
Ignore(Expect("(")),
Reference("_"),
Choice(
Reference("expression"),
Pack(Join(Inject("empty"), Empty())),
),
Reference("_"),
Ignore(Expect(")")),
),
"error": Pack(Join(Inject("error"),
Ignore(Expect("^")),
Reference("_"),
Choice(
Reference("sstring"),
Reference("dstring"),
),
)),
"expectany": Pack(Join(Inject("expectany"), Reference("sstring"))),
"expectall": Pack(Join(Inject("expectall"), Reference("dstring"))),
"inject": Pack(Join(Inject("inject"), Reference("bstring"))),
"dot": Pack(Join(Inject("dot"), Expect("."))),
"reference": Pack(Join(Inject("reference"),
Reference("name"),
Negative(Join(
Reference("_"),
ExpectJoin("<-"),
)),
)),
"sstring": Fuse(Join(
Expect("'"),
OneOrMore(Choice(
Join(Expect("\\"), Any()),
Join(Negative(Expect("'")), Any()),
)),
Expect("'"),
)),
"dstring": Fuse(Join(
Expect('"'),
OneOrMore(Choice(
Join(Expect("\\"), Any()),
Join(Negative(Expect('"')), Any()),
)),
Expect('"'),
)),
"bstring": Fuse(Join(
Expect("`"),
OneOrMore(Choice(
Join(Expect("\\"), Any()),
Join(Negative(Expect("`")), Any()),
)),
Expect("`"),
)),
"name": Fuse(OneOrMore(ExpectChoice(
"_abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
))),
"_": Ignore(Repeat(Choice(
ExpectChoice(" \t\n"),
Join(
Expect("#"),
Repeat(Join(
Negative(Choice(
Expect("\n"),
Negative(Any()),
)),
Any(),
)),
Optional(Expect("\n")),
),
))),
})
def walk(node):
"""Recursively converts each tuple into a rule"""
type_, *rest = node
if type_ == "empty":
return Empty()
if type_ == "expectany":
string = literal_eval(rest[0])
return ExpectChoice(string)
if type_ == "expectall":
string = literal_eval(rest[0])
return ExpectJoin(string)
if type_ == "inject":
string = literal_eval('"' + rest[0][1:-1].replace('"', '\\"') + '"')
return Inject(string)
if type_ == "dot":
return Any()
if type_ == "reference":
name = rest[0]
return Reference(name)
if type_ == "packed":
return Pack(walk(rest[0]))
if type_ == "error":
string = literal_eval(rest[0])
return Error(string)
if type_ == "repeated":
rule = walk(rest[0])
for repeat in rest[1:]:
if repeat == "*":
rule = ZeroOrMore(rule)
continue
if repeat == "+":
rule = OneOrMore(rule)
continue
if repeat == "?":
rule = Optional(rule)
continue
raise ValueError(f"unknown repeat: {repeat!r}")
return rule
if type_ == "prefixed":
rule = walk(rest[-1])
for prefix in reversed(rest[:-1]):
if prefix == ":":
rule = Ignore(rule)
continue
if prefix == "~":
rule = Fuse(rule)
continue
if prefix == "!":
rule = Negative(rule)
continue
if prefix == "&":
rule = Positive(rule)
continue
raise ValueError(f"unknown prefix: {prefix!r}")
return rule
if type_ == "joined":
return Join(*map(walk, rest))
if type_ == "choices":
return Choice(*map(walk, rest))
if type_ == "definition":
return (rest[0], walk(rest[1]))
if type_ == "grammar":
grammar = Grammar()
for definition in rest:
name, rule = walk(definition)
if grammar.start is None:
grammar.start = name
grammar.rules[name] = rule
return grammar
raise ValueError(f"unknown node: {node!r}")
def create_rule(string):
"""Creates a rule from a string"""
return walk(grammar.parse(string, start="expression_start")[0])
def create_grammar(string):
"""Creates a grammar from a string"""
return walk(grammar.parse(string, start="grammar_start")[0])
def test_parse_grammar():
first = create_grammar(str(grammar))
second = create_grammar(str(first))
assert str(first) == str(second)
assert first == second
def test_parse_rule():
for name, rule in grammar.rules.items():
first = create_rule(str(rule))
second = create_rule(str(first))
assert str(first) == str(second)
assert first == second
|
import os
import json
import logging
import jsonschema
from functools import wraps
from flask import current_app, jsonify, request, json
from werkzeug.exceptions import BadRequest, InternalServerError
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
_DEFAULT_LOG_LEVEL = 'WARNING'
_DEFAULT_SCHEMA_DIR = './schemas'
_LOG_FORMAT = ('[%(asctime)s] [%(process)d] [%(levelname)s] [%(name)s] '
'%(message)s')
def jerror_handler(e):
"""http://jsonapi.org/format/#errors
"""
if not hasattr(e, 'name'):
e.name = 'Internal Server Error'
e.code = '500'
e.description = '{}: {}'.format(e.__class__.__name__, str(e))
response = {'errors': []}
response['errors'].append({"status": e.name,
"code": e.code,
"detail": e.description})
return jsonify(response), e.code
class Jerror(Exception):
def __init__(self, code, status, description):
super(Jerror, self).__init__()
self.code = code
self.name = status
self.description = description
class Jerify(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
self.logger = self._get_logger()
self.schemas = self._get_schemas()
def init_app(self, app):
app.config.setdefault('JERIFY_SCHEMAS', _DEFAULT_SCHEMA_DIR)
app.config.setdefault('JERIFY_LOG', _DEFAULT_LOG_LEVEL)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
def teardown(self, e):
ctx = stack.top
def _get_logger(self):
logger = logging.getLogger(__name__)
with self.app.app_context():
loglevel = current_app.config['JERIFY_LOG']
logger.setLevel(logging.getLevelName(loglevel))
console = logging.StreamHandler()
formatter = logging.Formatter(_LOG_FORMAT, '%Y-%m-%d %H:%M:%S +0000')
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def _get_schemas(self):
schemas = {}
with self.app.app_context():
schemas_dir = current_app.config['JERIFY_SCHEMAS']
if not os.path.isdir(schemas_dir):
return schemas
for root, dirs, files in os.walk(schemas_dir):
for file in files:
if not file.endswith('.schema.json'):
continue
with open(os.path.join(root, file), 'r') as file_handler:
try:
schema = json.load(file_handler)
jsonschema.Draft4Validator.check_schema(schema)
schema_name = file.replace('.schema.json', '')
schemas[schema_name] = (
os.path.join(os.getcwd(), root),
schema
)
except json.decoder.JSONDecodeError as e:
self.logger.warning('Decoding failed: {}'.format(file))
self.logger.debug(e.msg)
except jsonschema.ValidationError as e:
self.logger.warning('Invalid schema: {}'.format(file))
self.logger.debug(e.msg)
continue
except Exception as e:
self.logger.warning('Failed to load: {}'.format(file))
self.logger.debug(e.msg)
return schemas
def _check_request_schema(self, schema_name):
if schema_name in self.schemas:
try:
root, schema = self.schemas[schema_name]
resolver = jsonschema.RefResolver(
base_uri=f'file://{root}/',
referrer=schema
)
jsonschema.validate(
request.get_json(),
schema,
resolver=resolver
)
except jsonschema.ValidationError as e:
log = ('JSON failed validation against schema\'{}\': '
'{}'.format(schema_name, request.get_json()))
self.logger.info(log)
raise BadRequest(e.message)
else:
log = 'Unknown schema: {}'.format(schema_name)
self.logger.error(log)
raise InternalServerError()
def request(self, schema=None):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not request.get_json(silent=True):
log = 'Received invalid JSON: {}'.format(request.data)
self.logger.info(log)
raise BadRequest('Invalid JSON')
if schema:
self._check_request_schema(schema)
return f(*args, **kwargs)
return wrapper
return decorator
def response(self, dict_, schema):
self.validate(dict_, schema)
return jsonify(dict_)
def validate(self, dict_, schema_name):
if schema_name in self.schemas:
try:
root, schema = self.schemas[schema_name]
resolver = jsonschema.RefResolver(
base_uri=f'file://{root}/',
referrer=schema
)
jsonschema.validate(
dict_,
schema,
resolver=resolver
)
except jsonschema.ValidationError as e:
self.logger.error(e)
log = ('JSON failed validation against schema \'{}\': '
'{}'.format(schema_name, dict_))
self.logger.error(log)
raise InternalServerError(log)
else:
log = 'Unknown schema: {}'.format(schema_name)
self.logger.error(log)
raise InternalServerError()
|
from colorama import Fore
import re
from copy import deepcopy
from itertools import product
from ChessDRF.logic.board_and_controller import Controller
from ChessDRF.logic.figure import Figure
class Checker:
def __init__(self, game, board, test: bool):
self.game, self.board, self.test = game, board, test
def check(self, figure, end: tuple, with_king_danger_check=True):
status = self._check_correct_figure_select(figure, end)
if status:
if self._check_trajectory(end, status):
if with_king_danger_check:
if not self._is_act_danger_for_king(figure, end, status):
if not self.test:
Controller.message('Good!')
return status
elif not self.test:
Controller.message('King under attack!', Fore.RED)
else:
if not self.test:
Controller.message('Good!')
return status
def _check_correct_figure_select(self, figure: Figure, pos: tuple):
if not figure:
if not self.test:
Controller.message('No figure selected', Fore.RED)
else:
if figure.is_white != self.board.white_turn:
if not self.test:
Controller.message('It`s not your figure', Fore.RED)
else:
return figure.move_check(pos)
def _check_trajectory(self, pos: tuple, status: str):
functions = (
('I' in status, lambda p, s: False),
('AE' in status, self._check_en_passant),
('QС' in status, self._check_castling),
('QMA' in status, self._check_move_and_attack_query),
('QM' in status, self._check_move_query),
('MA' in status, self._check_move_and_attack),
('M' in status, self._check_move),
('A' in status, self._check_attack),
)
for statement, func in functions:
if statement:
return func(pos, status)
def _check_castling(self, _, status):
str_query = re.findall(r' (.+) R', status)[0]
query = self._get_query(str_query)
for pos in query:
if (not self._check_move(pos, status)) or self.is_tile_under_attack(pos):
return False
str_rook_pos = re.findall(r'R \((\d), (\d)\)', status)
rook_pos = tuple(map(int, str_rook_pos[0]))
rook = self.board[rook_pos]
if rook.kind == 'rook' and rook.status == 'S':
return True
def _check_en_passant(self, end, status):
pawn_pos = (int(status[6]), int(status[9]))
if pawn := self.board.get(pawn_pos):
if pawn.is_white != self.board.white_turn and pawn.status == 'E':
return not bool(self.board.get(end))
def _check_move_and_attack_query(self, end, status):
str_query = re.findall(r' (.+)', status)[0]
query = self._get_query(str_query)
for pos in query[:-1]:
if not self._check_move(pos, status):
return False
if self._check_move_and_attack(end, status):
return True
def _check_move_query(self, _, status):
str_query = re.findall(r' (.+)', status)[0]
query = self._get_query(str_query)
for pos in query:
if not self._check_move(pos, status):
return False
return True
def _check_move_and_attack(self, end, status):
return True if self._check_move(end, status) else self._check_attack(end, status)
def _check_move(self, end, _):
return not self.board.get(end)
def _check_attack(self, end, _):
if enemy := self.board.get(end):
if enemy.is_white != self.board.white_turn:
return True
@staticmethod
def _get_query(query):
return [(int(i[0]), int(i[-1])) for i in query[2:-2].split('), (')]
def is_tile_under_attack(self, pos):
copy = deepcopy(self.game)
copy.test = True
copy.board.white_turn = not copy.board.white_turn
for fig_pos, figure in self.board.items():
if figure.is_white != self.board.white_turn:
if copy.checker.check(figure, pos, False):
return True
def _is_act_danger_for_king(self, figure, end, status):
copy = deepcopy(self.game)
copy_figure = copy.board[figure.pos]
copy.act(copy_figure, end, status)
return copy.checker.check_check()
def check_check(self):
king = self.board.get_king(self.board.white_turn)
return self.is_tile_under_attack(king.pos)
def check_end_of_game(self):
if self._check_mate():
return 'M' if self.check_check() else 'SM'
if self._check_insufficient():
return 'IM'
def _check_mate(self):
all_poses = product(range(8), range(8))
for figure in filter(
lambda f: f.is_white == self.board.white_turn, self.board.values()):
for pos in all_poses:
if (status := self.check(figure, pos)) and status != 'I':
return
return True
def _check_insufficient(self):
white = black = 0
for figure in self.board.values():
if figure.kind in ('queen', 'rook', 'pawn'):
return
elif figure.kind in ('knight', 'bishop'):
if figure.is_white:
white += 1
else:
black += 1
if max(white, black) < 2:
return True
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## <EMAIL>
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Models for meta-learning. """
import tensorflow as tf
from tensorflow.python.platform import flags
from utils.misc import mse, softmaxloss, xent, resnet_conv_block, resnet_nob_conv_block
FLAGS = flags.FLAGS
def MakeMetaModel():
"""The function to make meta model.
Arg:
Meta-train model class.
"""
if FLAGS.backbone_arch=='resnet12':
try:#python2
from resnet12 import Models
except ImportError:#python3
from models.resnet12 import Models
elif FLAGS.backbone_arch=='resnet18':
try:#python2
from resnet18 import Models
except ImportError:#python3
from models.resnet18 import Models
else:
print('Please set the correct backbone')
class MetaModel(Models):
"""The class for the meta models. This class is inheritance from Models, so some variables are in the Models class."""
def construct_model(self):
"""The function to construct meta-train model."""
# Set the placeholder for the input episode
self.inputa = tf.placeholder(tf.float32) # episode train images
self.inputb = tf.placeholder(tf.float32) # episode test images
self.labela = tf.placeholder(tf.float32) # episode train labels
self.labelb = tf.placeholder(tf.float32) # episode test labels
with tf.variable_scope('meta-model', reuse=None) as training_scope:
# construct the model weights
self.ss_weights = ss_weights = self.construct_resnet_ss_weights()
self.weights = weights = self.construct_resnet_weights()
self.fc_weights = fc_weights = self.construct_fc_weights()
# Load base epoch number from FLAGS
num_updates = FLAGS.train_base_epoch_num
def task_metalearn(inp, reuse=True):
"""The function to process one episode in a meta-batch.
Args:
inp: the input episode.
reuse: whether reuse the variables for the normalization.
Returns:
A serious outputs like losses and accuracies.
"""
# Seperate inp to different variables
inputa, inputb, labela, labelb = inp
# Generate empty list to record losses
lossa_list = [] # Base train loss list
lossb_list = [] # Base test loss list
# Embed the input images to embeddings with ss weights
emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse) # Embed episode train
emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True) # Embed episode test
# Run the first epoch of the base learning
# Forward fc layer for episode train
outputa = self.forward_fc(emb_outputa, fc_weights)
# Calculate base train loss
lossa = self.loss_func(outputa, labela)
# Record base train loss
lossa_list.append(lossa)
# Forward fc layer for episode test
outputb = self.forward_fc(emb_outputb, fc_weights)
# Calculate base test loss
lossb = self.loss_func(outputb, labelb)
# Record base test loss
lossb_list.append(lossb)
# Calculate the gradients for the fc layer
grads = tf.gradients(lossa, list(fc_weights.values()))
gradients = dict(zip(fc_weights.keys(), grads))
# Use graient descent to update the fc layer
fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \
self.update_lr*gradients[key] for key in fc_weights.keys()]))
for j in range(num_updates - 1):
# Run the following base epochs, these are similar to the first base epoch
lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)
lossa_list.append(lossa)
lossb = self.loss_func(self.forward_fc(emb_outputb, fast_fc_weights), labelb)
lossb_list.append(lossb)
grads = tf.gradients(lossa, list(fast_fc_weights.values()))
gradients = dict(zip(fast_fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \
self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))
# Calculate final episode test predictions
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
# Calculate the final episode test loss, it is the loss for the episode on meta-train
final_lossb = self.loss_func(outputb, labelb)
# Calculate the final episode test accuarcy
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
# Reorganize all the outputs to a list
task_output = [final_lossb, lossb_list, lossa_list, accb]
return task_output
# Initial the batch normalization weights
if FLAGS.norm is not None:
unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
# Set the dtype of the outputs
out_dtype = [tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, tf.float32]
# Run two episodes for a meta batch using parallel setting
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \
dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
# Seperate the outputs to different variables
lossb, lossesb, lossesa, accsb = result
# Set the variables to output from the tensorflow graph
self.total_loss = total_loss = tf.reduce_sum(lossb) / tf.to_float(FLAGS.meta_batch_size)
self.total_accuracy = total_accuracy = tf.reduce_sum(accsb) / tf.to_float(FLAGS.meta_batch_size)
self.total_lossa = total_lossa = [tf.reduce_sum(lossesa[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
self.total_lossb = total_lossb = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
# Set the meta-train optimizer
optimizer = tf.train.AdamOptimizer(self.meta_lr)
self.metatrain_op = optimizer.minimize(total_loss, var_list=list(ss_weights.values()) + list(fc_weights.values()))
# Set the tensorboard
self.training_summaries = []
self.training_summaries.append(tf.summary.scalar('Meta Train Loss', (total_loss / tf.to_float(FLAGS.metatrain_epite_sample_num))))
self.training_summaries.append(tf.summary.scalar('Meta Train Accuracy', total_accuracy))
for j in range(num_updates):
self.training_summaries.append(tf.summary.scalar('Base Train Loss Step' + str(j+1), total_lossa[j]))
for j in range(num_updates):
self.training_summaries.append(tf.summary.scalar('Base Val Loss Step' + str(j+1), total_lossb[j]))
self.training_summ_op = tf.summary.merge(self.training_summaries)
self.input_val_loss = tf.placeholder(tf.float32)
self.input_val_acc = tf.placeholder(tf.float32)
self.val_summaries = []
self.val_summaries.append(tf.summary.scalar('Meta Val Loss', self.input_val_loss))
self.val_summaries.append(tf.summary.scalar('Meta Val Accuracy', self.input_val_acc))
self.val_summ_op = tf.summary.merge(self.val_summaries)
def construct_test_model(self):
"""The function to construct meta-test model."""
# Set the placeholder for the input episode
self.inputa = tf.placeholder(tf.float32)
self.inputb = tf.placeholder(tf.float32)
self.labela = tf.placeholder(tf.float32)
self.labelb = tf.placeholder(tf.float32)
with tf.variable_scope('meta-test-model', reuse=None) as training_scope:
# construct the model weights
self.ss_weights = ss_weights = self.construct_resnet_ss_weights()
self.weights = weights = self.construct_resnet_weights()
self.fc_weights = fc_weights = self.construct_fc_weights()
# Load test base epoch number from FLAGS
num_updates = FLAGS.test_base_epoch_num
def task_metalearn(inp, reuse=True):
"""The function to process one episode in a meta-batch.
Args:
inp: the input episode.
reuse: whether reuse the variables for the normalization.
Returns:
A serious outputs like losses and accuracies.
"""
# Seperate inp to different variables
inputa, inputb, labela, labelb = inp
# Generate empty list to record accuracies
accb_list = []
# Embed the input images to embeddings with ss weights
emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)
emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)
# This part is similar to the meta-train function, you may refer to the comments above
outputa = self.forward_fc(emb_outputa, fc_weights)
lossa = self.loss_func(outputa, labela)
grads = tf.gradients(lossa, list(fc_weights.values()))
gradients = dict(zip(fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \
self.update_lr*gradients[key] for key in fc_weights.keys()]))
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
accb_list.append(accb)
for j in range(num_updates - 1):
lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)
grads = tf.gradients(lossa, list(fast_fc_weights.values()))
gradients = dict(zip(fast_fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \
self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
accb_list.append(accb)
lossb = self.loss_func(outputb, labelb)
task_output = [lossb, accb, accb_list]
return task_output
if FLAGS.norm is not None:
unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
out_dtype = [tf.float32, tf.float32, [tf.float32]*num_updates]
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \
dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
lossesb, accsb, accsb_list = result
self.metaval_total_loss = total_loss = tf.reduce_sum(lossesb)
self.metaval_total_accuracy = total_accuracy = tf.reduce_sum(accsb)
self.metaval_total_accuracies = total_accuracies =[tf.reduce_sum(accsb_list[j]) for j in range(num_updates)]
return MetaModel()
|
<reponame>wsutc/SEAS-purchase-system<gh_stars>0
from asyncio.windows_events import NULL
from django.db import models
from django.utils import timezone
from phonenumber_field.modelfields import PhoneNumberField
from pyexpat import model
###------------------------------- Item Setup -----------------------------------
class Manufacturer(models.Model):
name = models.CharField("Name of Manufacturer",max_length=50)
website = models.URLField("URL of Manufacturer",blank=True)
wsu_discount = models.BooleanField("Does WSU get a discount?",default=False)
discount_percentage = models.FloatField(default=0)
# mfg_logo = models.ImageField("Manufacturer Logo (optional)",blank=True)
created_date = models.DateTimeField("Date manufacturer added",auto_now_add=True)
phone = PhoneNumberField("Manufacturer Phone Number (optional)",blank=True)
def __str__(self):
return self.name
class Vendor(models.Model):
name = models.CharField("Name of Vendor",max_length=50)
wsu_discount = models.BooleanField("Does WSU get a discount?",default=False)
discount_percentage = models.FloatField(default=0)
website = models.URLField("URL/Link to Vendor Website")
# vendor_logo = models.ImageField("Vendor Logo (optional)",blank=True)
phone = PhoneNumberField("Vendor Phone Number",null=False,blank=True)
street1 = models.CharField("Address 1",max_length=50,blank=True)
street2 = models.CharField("Address 2 (optional)",max_length=50,blank=True)
city = models.CharField("City",max_length=50,blank=True)
state = models.CharField("State",max_length=50,blank=True)
zip = models.CharField("ZIP Code",max_length=10,blank=True)
def __str__(self):
return self.name
class Product(models.Model):
FREE = 'buyers_choice'
STRICT = 'strict'
ASK = 'ask_before'
SUBSTITUTIONS = (
(FREE, 'Substitute with like product'),
(STRICT, 'No substitutions allowed'),
(ASK, 'Ask before substituting'),
)
name = models.CharField("Name of Product",max_length=50)
description = models.TextField("Description of product")
created_date = models.DateTimeField("Date Product Created",auto_now_add=True)
original_manufacturer = models.ForeignKey(Manufacturer,on_delete=models.PROTECT)
specification = models.TextField("Detailed Specifications (required if no specification sheet)")
spec_sheet = models.FileField("Specifications",upload_to='products',blank=True)
# picture = models.ImageField("Product Image (options)",upload_to='products',blank=True)
substitution = models.CharField(
"Product Replacement",
choices=SUBSTITUTIONS,
default='buyers_choice',
max_length=150
)
approved_substitutes = models.ForeignKey('self',null=True,on_delete=models.PROTECT,blank=True)
approved_vendors = models.ForeignKey(Vendor,on_delete=models.CASCADE,null=True)
last_price = models.DecimalField("Last Price",decimal_places=2,max_digits=10)
link = models.URLField("Direct Link",blank=True)
identifier = models.CharField("Unique Identifier (ASIN/UPC/PN/etc.)",max_length=50,blank=True)
def __str__(self):
return self.name
###--------------------------------------- Imported Data -------------------------------------
class Accounts(models.Model):
account = models.CharField("Account",max_length=10)
budget_code = models.CharField("Budget Code",max_length=5)
fund = models.CharField("Fund",max_length=5)
program_workday = models.CharField("Program Workday",max_length=10)
account_title = models.CharField("Account Title",max_length=200)
def __str__(self):
return "Title: %s" % (self.account_title)
###--------------------------------------- Request Setup -------------------------------------
class Department(models.Model):
code = models.CharField("Code/Abbreviation",max_length=10)
name = models.CharField("Full Department Name",max_length=150)
def __str__(self):
return "%s → %s" % (self.name,self.code)
class Requisitioner(models.Model):
first_name = models.CharField("First Name",max_length=50,blank=False)
last_name = models.CharField("Last Name",max_length=50,blank=False)
phone = models.CharField("Phone Number",max_length=10,blank=False)
email = models.EmailField("Email",max_length=50,blank=False)
department = models.ForeignKey(Department,on_delete=models.PROTECT)
def __str__(self):
return "Name: %s %s" % (self.first_name,self.last_name)
class PurchaseRequest(models.Model):
id = models.AutoField(primary_key=True,editable=False)
requisitioner = models.ForeignKey(Requisitioner,on_delete=models.PROTECT)
number = models.CharField(max_length=10,unique=True)
products = models.ManyToManyField(Product)
created_date = models.DateTimeField("Created Date",auto_now_add=True)
need_by_date = models.DateField("Date Required (optional)",blank=True)
tax_exempt = models.BooleanField("Tax Exempt?",default=False)
accounts = models.ManyToManyField(Accounts)
subtotal = models.DecimalField("Subtotal",decimal_places=2,max_digits=10)
shipping = models.DecimalField("Shipping ($)",decimal_places=2,max_digits=10)
sales_tax = models.DecimalField("Sales Tax ($)",decimal_places=2,max_digits=10)
grand_total = models.DecimalField("Grand Total ($)",decimal_places=2,max_digits=10)
justification = models.TextField("Justification",blank=False)
instruction = models.TextField(
"Special Instructions",
default='Because grand total amount does not include shipping/handling and tax costs, Dr. Mo approves if total costs exceeds grand total amount.',
)
PO = 'po'
PCARD = 'pcard'
IRI = 'iri'
INV_VOUCHER = 'invoice voucher'
CONTRACT = 'contract'
PURCHASE_TYPE = (
(PO, 'PURCHASE ORDER'),
(PCARD, 'PCARD'),
(IRI, 'IRI'),
(INV_VOUCHER, 'INVOICE VOUCHER'),
(CONTRACT, 'CONTRACT')
)
purchase_type = models.CharField(
"Choose One",
choices=PURCHASE_TYPE,
default='pcard',
max_length=150
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.set_number()
def set_number(self):
if not self.number:
number = "PR" + str(self.id + (10 ** 4)) # Creates a number starting with 'PR' and ending with a 5 character (10^4) unique ID
request = PurchaseRequest.objects.get(id=self.id)
request.number = number
request.save()
def __str__(self):
return self.number
class PurchaseOrder(models.Model):
id = models.AutoField(primary_key=True,editable=False)
number = models.CharField(max_length=10,unique=True)
# source_PR = models.ManyToManyField("Source Purchase Request(s)",PurchaseRequest)
vendor = models.ForeignKey("Vendor",Vendor)
# products = models.ManyToManyField(Product)
created_date = models.DateTimeField("Created Date",auto_now_add=True)
tax_exempt = models.BooleanField("Tax Exempt?",default=False)
# accounts = models.ManyToManyField(Accounts)
subtotal = models.DecimalField("Subtotal",decimal_places=2,max_digits=10)
shipping = models.DecimalField("Shipping ($)",decimal_places=2,max_digits=10)
sales_tax = models.DecimalField("Sales Tax ($)",decimal_places=2,max_digits=10)
grand_total = models.DecimalField("Grand Total ($)",decimal_places=2,max_digits=10)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.set_number()
def set_number(self):
if not self.number:
number = "PO" + str(self.id + (10 ** 4)) # Creates a number starting with 'PO' and ending with a 5 character (10^4) unique ID
request = PurchaseOrder.objects.get(id=self.id)
request.number = number
request.save()
def __str__(self):
return self.number
|
<filename>gym_unblockme/envs/unblockme_class.py<gh_stars>1-10
import numpy as np
# Unblock Me class
def get_example():
## Example matrix input
matrix_input = np.array(
[ [0, 2, 2, 0],
[1, 1, 0, 3],
[0, 0, 0, 3],
[2, 2, 0, 0]
])
target_input = [1,3]
# 0: Empty
# 1: Red Block
# 2: Horizontal Blocks
# 3: Vertical Blocks
return matrix_input, target_input
class unblock_me:
# Initialization
## Input Space: W x H x 4
# (Semplification: Only use length 2 blocks)
# (Semplification2: Assume Red Block is Horizontal) <--
#
# Layer 0: Target Position
# Layer 1: Red Block
# Layer 2: Horizontal Blocks
# Layer 3: Vertical Blocks
def __init__ (self, matrix, goal):
self.goal = goal
goal_x, goal_y = self.goal
self.shape = matrix.shape + (4,)
self.internal_state = np.zeros(self.shape)
self.internal_state[goal_x,goal_y,0] = 1 #Target Position
for i in range(1,4):
self.internal_state[:,:,i] = (matrix == i).astype(int)
self.num_blocks = np.sum((self.internal_state[:,:,1:] != 0).astype(int))/2
def __is_valid (self, x,y):
return x >= 0 and x < self.shape[0] and \
y >= 0 and y < self.shape[1]
def get_block_listed(self):
r = np.nonzero(self.internal_state[:,:,0])
b = [i for i in zip(r[0],r[1])]
state = b
for i in range(1,3):
r = np.nonzero(self.internal_state[:, :, i])
b = [i for i in zip(r[0],r[1])]
state += (b)
# for the vertical blocks we transpose to have the coordinates in the right order
r = np.nonzero(np.transpose(self.internal_state[:, :, 3]))
b = [i for i in zip(r[1], r[0])]
state += (b)
r = np.array(state).flatten()
return r
def is_solved(self):
goal_x, goal_y = self.goal
goal_cell = self.internal_state[goal_x, goal_y,:]
return (goal_cell == [1,1,0,0]).all()
def print(self):
matrix = np.copy(self.internal_state[:,:,0])
for i in range(1,4):
matrix += (self.internal_state[:,:,i] == 1).astype(int)*(i+1)
print(matrix)
# Valid only if (x,y) targets a block (Horizontal, Vertical or Red)
# Valid only if the direction is empty
#
# (X,Y) Coordinates where (0,0) is the top left corner
#
# DIR (Depends on the type of block)
# 0: up, left
# 1: down, right
#
# Return: True if the move is valid, False instead
def __act(self, x, y, dir):
cell = self.internal_state[x,y,1:]
# No block selected
if (cell == 0).all():
return False
# Check if in the selected cell there is only 1 block
# If not then the board is in an inconsistent state and we raise an expection
selected_block = np.where(cell == 1)[0]
if len(selected_block) > 1:
print("Coordinates: " + str((x,y)))
print("Cell content: " + self.internal_state[x,y,:])
raise Exception("Inconsistent state of the board")
selected_block = selected_block[0]
# Target Direction to move
shift_x = 0; shift_y = 0
# If is the Red or Horizontal Block than try to move (left, right)
# (because of Semplification2)
if selected_block in [0,1]:
if dir == 0: # move left
shift_x = 0; shift_y = -1
else: # move right
shift_x = 0; shift_y = +1
# Vertical Block
else:
if dir == 0: # move up
shift_x = -1; shift_y = 0
else: # move down
shift_x = +1; shift_y = 0
if not self.__is_valid(x + shift_x, y + shift_y):
return False
# if the target cell is occupied, it could mean that we
# are selecting the other part of the block
if (self.internal_state[x + shift_x, y + shift_y,1:] == 1).any():
# if it is a different block, the move is Invalid
if not (self.internal_state[x + shift_x, y + shift_y,1:] == cell).all():
return False
# if also the second cell is occupied the move is invalid
if not self.__is_valid(x + 2*shift_x, y + 2*shift_y):
return False
if (self.internal_state[x + 2*shift_x, y + 2*shift_y, 1:] == 1).any():
return False
# Move the tile and free the space
self.internal_state[x + 2*shift_x, y + 2*shift_y,1:] = cell
self.internal_state[x , y ,1:] = [0,0,0]
else:
# Move the tile and free the space
self.internal_state[x + shift_x, y + shift_y,1:] = cell
self.internal_state[x - shift_x, y - shift_y,1:] = [0,0,0]
# if we reached this point the move as been succesful
return True
def act(self, action):
if len(action) != 3:
raise Exception("Non valid action")
if not (action[:2] < self.shape[:2]).all():
raise Exception("Non valid action")
if action[2] not in [0,1]:
raise Exception("Non valid action")
return self.__act(action[0], action[1], action[2])
if __name__ == "__main__":
#from unblockme_gym import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
game.print()
print(game.num_blocks)
print(game.get_block_listed()) |
<reponame>nitz14/hackaton
import multiprocessing
import time
from enum import Enum
import cv2
import keyboard
import numpy as np
import tensorflow as tf
class Key(Enum):
UP = "up"
DOWN = "down"
LEFT = "left"
RIGHT = "right"
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name("image_tensor:0")
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name(
"detection_boxes:0"
)
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name(
"detection_scores:0"
)
self.detection_classes = self.detection_graph.get_tensor_by_name(
"detection_classes:0"
)
self.num_detections = self.detection_graph.get_tensor_by_name(
"num_detections:0"
)
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
# start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[
self.detection_boxes,
self.detection_scores,
self.detection_classes,
self.num_detections,
],
feed_dict={self.image_tensor: image_np_expanded},
)
# end_time = time.time()
# print("Elapsed Time:", end_time - start_time)
im_height, im_width, _ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (
int(boxes[0, i, 0] * im_height),
int(boxes[0, i, 1] * im_width),
int(boxes[0, i, 2] * im_height),
int(boxes[0, i, 3] * im_width),
)
return (
boxes_list,
scores[0].tolist(),
[int(x) for x in classes[0].tolist()],
int(num[0]),
)
def close(self):
self.sess.close()
self.default_graph.close()
def threaded_function(kolejka, do_rozpoznania):
model_path = "frozen_inference_graph.pb"
odapi = DetectorAPI(path_to_ckpt=model_path)
threshold = 0.9
while True:
img = kolejka.get()
boxes, scores, classes, num = odapi.processFrame(img)
ludzie = [
boxes[i]
for i in range(len(boxes))
if classes[i] == 1 and scores[i] > threshold
]
if len(ludzie) > 1:
print("BLAD! TYLKO JEDNA OSOBA MA GRAC!")
elif len(ludzie) == 1:
do_rozpoznania.put_nowait(ludzie[0])
for i in range(len(boxes)):
# Class 1 represents human
if classes[i] == 1 and scores[i] > threshold:
box = boxes[i]
cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2)
cv2.imshow("preview", img)
key = cv2.waitKey(1)
if key & 0xFF == ord("q"):
break
def rozpoznaj(do_rozpoznania, do_sterowania):
last_box = do_rozpoznania.get()
while True:
box = do_rozpoznania.get()
if last_box and box:
key = get_key(last_box, box)
if key:
do_sterowania.put_nowait(key)
last_box = box
def get_key(p_box, c_box):
tolerancja_bledu = 0.2
p_height = abs(p_box[0] - p_box[2])
p_with = abs(p_box[1] - p_box[3])
delta = p_with * tolerancja_bledu
delta_h = p_height * tolerancja_bledu
print("parsuje")
if c_box[1] < p_box[1] - delta and c_box[3] > p_box[3] + delta:
return Key.UP
elif c_box[1] < p_box[1] - delta:
return Key.LEFT
elif c_box[3] > p_box[3] + delta:
return Key.RIGHT
elif c_box[0] > p_box[0] + delta_h:
return Key.DOWN
return None
def steruj(do_sterowania):
while True:
key = do_sterowania.get()
if key == Key.DOWN:
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
keyboard.press_and_release(key.value)
print(key)
if __name__ == "__main__":
cap = cv2.VideoCapture(0)
kolejka = multiprocessing.Queue(maxsize=1)
do_rozpoznania = multiprocessing.Queue()
do_sterowania = multiprocessing.Queue()
thread = multiprocessing.Process(
target=threaded_function, args=(kolejka, do_rozpoznania)
)
thread.start()
thread_rozpoznania = multiprocessing.Process(
target=rozpoznaj, args=(do_rozpoznania, do_sterowania)
)
thread_rozpoznania.start()
thread_sterowania = multiprocessing.Process(
target=steruj, args=(do_sterowania,), daemon=True
)
thread_sterowania.start()
while True:
r, img = cap.read()
img = cv2.resize(img, (1280, 720))
img = cv2.flip(img, 1)
try:
kolejka.get_nowait()
except multiprocessing.queues.Empty:
kolejka.put_nowait(img.copy())
|
"""Test LinearActuator state plotting functionality."""
# Standard imports
import argparse
import asyncio
import logging
# Local package imports
from lhrhost.dashboard.linear_actuator.plots import LinearActuatorPlotter as Plotter
from lhrhost.messaging import (
MessagingStack,
add_argparser_transport_selector, parse_argparser_transport_selector
)
from lhrhost.protocol.linear_actuator import Protocol
from lhrhost.tests.messaging.transport.batch import (
BatchExecutionManager, LOGGING_CONFIG
)
from lhrhost.util import batch
# Logging
logging.config.dictConfig(LOGGING_CONFIG)
class Batch:
"""Actor-based batch execution."""
def __init__(self, transport_loop, axis):
"""Initialize member variables."""
self.messaging_stack = MessagingStack(transport_loop)
self.protocol = Protocol('{}-Axis'.format(axis.upper()), axis)
self.protocol_plotter = Plotter(self.protocol)
self.messaging_stack.register_response_receivers(self.protocol)
self.messaging_stack.register_command_senders(self.protocol)
self.batch_execution_manager = BatchExecutionManager(
self.messaging_stack.arbiter, self.messaging_stack.command_sender,
self.test_routine, header=batch.OUTPUT_HEADER,
ready_waiter=self.messaging_stack.connection_synchronizer.wait_connected
)
self.messaging_stack.register_execution_manager(self.batch_execution_manager)
print('Showing plot...')
self.protocol_plotter.show()
async def test_routine(self):
"""Run the batch execution test routine."""
print('Running test routine...')
await self.protocol.initialized.wait()
self.colors = {
0: 'gray', # braking
-1: 'orange', # stalled
-2: 'green', # converged
-3: 'red', # timer
}
print('Motor position feedback control with position and motor duty notification')
await self.protocol.position.notify.change_only.request(0)
await self.protocol.position.notify.interval.request(20)
await self.protocol.motor.notify.change_only.request(0)
await self.protocol.motor.notify.interval.request(20)
await self.protocol.position.notify.request(2)
await self.protocol.motor.notify.request(2)
for i in range(10):
await self.go_to_position(100)
await asyncio.sleep(0.5)
await self.go_to_position(700)
await asyncio.sleep(0.5)
await self.protocol.position.notify.request(0)
await self.protocol.motor.notify.request(0)
print(batch.OUTPUT_FOOTER)
print('Idling...')
self.protocol_plotter.server.run_until_shutdown()
async def go_to_position(self, position):
"""Send the actuator to the specified position."""
self.protocol_plotter.position_plotter.add_arrow(position, slope=2)
self.protocol_plotter.duty_plotter.start_state_region()
await self.protocol.feedback_controller.request_complete(position)
self.protocol_plotter.duty_plotter.add_state_region(
self.colors[self.protocol.last_response_payload]
)
def main():
"""Test batch scripted control of a linear actuator."""
parser = argparse.ArgumentParser(
description='Test batch scripted control of a linear actuator.'
)
add_argparser_transport_selector(parser)
parser.add_argument(
'axis', choices=['p', 'z', 'y', 'x'],
help='Linear actuator axis.'
)
args = parser.parse_args()
transport_loop = parse_argparser_transport_selector(args)
batch = Batch(transport_loop, args.axis)
batch.messaging_stack.run()
if __name__ == '__main__':
main()
|
<gh_stars>1-10
from datetime import timedelta
from prefect import task, Flow, unmapped
from prefect.engine.executors import DaskExecutor
import os
# the async db client pool used for the API is not serializable
from prefect.tasks.postgres import PostgresExecute, PostgresFetch
from db.schemas import observations, annotations
from psycopg2.errors import UniqueViolation
# todo: these prefect core tasks should be extended to support connection.executemany()
# to run whole chunks in a single transaction
pg_ex = PostgresExecute("reyearn_dev", user=None, password=None, host="localhost",)
pg_fetch = PostgresFetch("reyearn_dev", user=None, password=None, host="localhost",)
@task
def get_existing_classes():
existing_classes = list(
# returns 2d tuples
pg_fetch.run(
fetch="all",
query="""
--sql
select type, label from reyearn.classes;
""",
)
)
existing_labels = [label[1] for label in existing_classes]
return [existing_classes, existing_labels]
@task(max_retries=3, retry_delay=timedelta(seconds=1))
def build_file_lists():
chunk_size = 100
labelled_file_list = []
unlabelled_file_list = []
for dirpath, dirs, files in os.walk("./data/import", followlinks=True):
for f in files:
if "README.md" not in f and "." not in f[0]:
label = dirpath.split("/")[-1]
# labelled data has a label path with len > 1 level
# label roots are identical to the class type
if "." in label:
labelled_file_list.append(f"{dirpath}/{f}")
else:
unlabelled_file_list.append(f"{dirpath}/{f}")
chunked_labelled_files = [
labelled_file_list[i * chunk_size : (i + 1) * chunk_size]
for i in range((len(labelled_file_list) + chunk_size - 1) // chunk_size)
]
chunked_unlabelled_files = [
unlabelled_file_list[i * chunk_size : (i + 1) * chunk_size]
for i in range((len(unlabelled_file_list) + chunk_size - 1) // chunk_size)
]
return [chunked_labelled_files, chunked_unlabelled_files]
@task
def load_chunked_files(chunked_files, existing_labels):
annotations = []
for file in chunked_files:
# toss weird characters, they won't make it past the tokenizer anyway
# full content type parsing is of course the holy grail
with open(file, "r", encoding="latin-1", errors="ignore") as filehandle:
filecontent = filehandle.read()
file_label = file.split("/")[-2]
if file_label not in existing_labels:
label_root = file_label.split(".")[0]
# todo: fix minor seq inflation with extra select
# when migrating to chunk-level transactions
pg_ex.run(
query="""
--sql
insert into reyearn.classes (type, label) values (%s, %s)
on conflict do nothing;
""",
data=(label_root, file_label),
)
try:
pg_ex.run(
query=f"""
--sql
insert into reyearn_tenant_0.observations (text, hash)
values (%s, MD5(%s)) on conflict do nothing;
""",
data=(filecontent, filecontent),
)
except UniqueViolation as error:
pass
# todo: remove once client supports inserts with returning,
# for chunk-level transactions, and revisit other hash functions
file_hash = pg_fetch.run(
fetch="one", query="select md5(%s);", data=(filecontent,)
)
annotations.append((file_label, file_hash[0]))
return annotations
@task
def load_annotations(annotations):
for anno in annotations:
pg_ex.run(
query="""
--sql
insert into reyearn.annotations (class_label, observation_hash, status)
values (%s, %s, %s) on conflict do nothing;
""",
data=(anno[0], anno[1], "confirmed"),
)
def main():
with Flow("importer", schedule=None) as flow:
# tasks
existing_classes = get_existing_classes()
chunked_files = build_file_lists()
# partition and map over to flatten load curve
# labelled data
annotations = load_chunked_files.map(
chunked_files[0], unmapped(existing_classes[1]),
)
# unlabelled data
load_chunked_files.map(
chunked_files[1], unmapped(existing_classes[1]),
)
load_annotations.map(annotations)
# register with dashboard
try:
flow.register(project_name="Reyearn")
except:
pass
# agent can be run externally with `prefect agent start`
# flow.run_agent(show_flow_logs=True)
flow_state = flow.run(executor=DaskExecutor())
# uncomment to output pdf visualization of this flow
flow.visualize(flow_state=flow_state, filename="dags/importer_latest")
if __name__ == "__main__":
main()
|
<filename>tests/conftest.py<gh_stars>1-10
import pytest
@pytest.fixture(autouse=True)
def setup(fn_isolation):
"""
Isolation setup fixture.
This ensures that each test runs against the same base environment.
"""
pass
@pytest.fixture(scope="module")
def aave_lending_pool_v1(Contract):
"""
Yield a `Contract` object for the Aave lending pool address provider.
"""
yield Contract("0x24a42fD28C976A61Df5D00D0599C34c4f90748c8")
@pytest.fixture(scope="module")
def flashloan_v1(FlashloanV1, aave_lending_pool_v1, accounts):
"""
Deploy a `Flashloan` contract from `web3.eth.accounts[0]` and yields the
generated object.
"""
yield FlashloanV1.deploy(aave_lending_pool_v1, {"from": accounts[0]})
@pytest.fixture(scope="module")
def aave_lending_pool_v2(Contract):
"""
Yield a `Contract` object for the Aave lending pool address provider.
"""
yield Contract("0xB53C1a33016B2DC2fF3653530bfF1848a515c8c5")
@pytest.fixture(scope="module")
def flashloan_v2(FlashloanV2, aave_lending_pool_v2, accounts):
"""
Deploy a `Flashloan` contract from `web3.eth.accounts[0]` and yields the
generated object.
"""
yield FlashloanV2.deploy(aave_lending_pool_v2, {"from": accounts[0]})
# Mainnet reserve token fixtures - addresses are taken from
# https://docs.aave.com/developers/v/1.0/deployed-contracts/deployed-contract-instances#reserves-assets
@pytest.fixture(scope="module")
def WETH(Contract):
yield Contract("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")
@pytest.fixture(scope="module")
def ETH():
yield "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
@pytest.fixture(scope="module")
def DAI(Contract):
yield Contract("0x6B175474E89094C44Da98b954EedeAC495271d0F")
@pytest.fixture(scope="module")
def USDC(Contract):
yield Contract("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48")
@pytest.fixture(scope="module")
def SUSD(Contract):
yield Contract("0x57Ab1ec28D129707052df4dF418D58a2D46d5f51")
@pytest.fixture(scope="module")
def TUSD(Contract):
yield Contract("0x0000000000085d4780B73119b644AE5ecd22b376")
@pytest.fixture(scope="module")
def USDT(Contract):
yield Contract("0xdAC17F958D2ee523a2206206994597C13D831ec7")
@pytest.fixture(scope="module")
def BUSD(Contract):
yield Contract("0x4Fabb145d64652a948d72533023f6E7A623C7C53")
@pytest.fixture(scope="module")
def BAT(Contract):
yield Contract("0x0D8775F648430679A709E98d2b0Cb6250d2887EF")
@pytest.fixture(scope="module")
def KNC(Contract):
yield Contract("0xdd974D5C2e2928deA5F71b9825b8b646686BD200")
@pytest.fixture(scope="module")
def LEND(Contract):
yield Contract("0x80fB784B7eD66730e8b1DBd9820aFD29931aab03")
@pytest.fixture(scope="module")
def LINK(Contract):
yield Contract("0x514910771AF9Ca656af840dff83E8264EcF986CA")
@pytest.fixture(scope="module")
def MANA(Contract):
yield Contract("0x0F5D2fB29fb7d3CFeE444a200298f468908cC942")
@pytest.fixture(scope="module")
def MKR(Contract):
yield Contract("0x9f8F72aA9304c8B593d555F12eF6589cC3A579A2")
@pytest.fixture(scope="module")
def REP(Contract):
yield Contract("0x1985365e9f78359a9B6AD760e32412f4a445E862")
@pytest.fixture(scope="module")
def SNX(Contract):
yield Contract("0xC011a73ee8576Fb46F5E1c5751cA3B9Fe0af2a6F")
@pytest.fixture(scope="module")
def WBTC(Contract):
yield Contract("0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599")
@pytest.fixture(scope="module")
def ZRX(Contract):
yield Contract("0xE41d2489571d322189246DaFA5ebDe1F4699F498")
|
<filename>symposion/sponsorship/views.py
from zipfile import ZipFile, ZIP_DEFLATED
import StringIO #as StringIO
import os
import json
from django.http import Http404, HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from symposion.sponsorship.forms import SponsorApplicationForm, SponsorDetailsForm, SponsorBenefitsFormSet
from symposion.sponsorship.models import Sponsor, SponsorBenefit
@login_required
def sponsor_apply(request):
if request.method == "POST":
form = SponsorApplicationForm(request.POST, user=request.user)
if form.is_valid():
sponsor = form.save()
return redirect("sponsor_detail", pk=sponsor.pk)
else:
form = SponsorApplicationForm(user=request.user)
return render_to_response("sponsorship/apply.html", {
"form": form,
}, context_instance=RequestContext(request))
@login_required
def sponsor_add(request):
if not request.user.is_staff:
raise Http404()
if request.method == "POST":
form = SponsorApplicationForm(request.POST, user=request.user)
if form.is_valid():
sponsor = form.save(commit=False)
sponsor.save()
return redirect("sponsor_detail", pk=sponsor.pk)
else:
form = SponsorApplicationForm(user=request.user)
return render_to_response("sponsorship/add.html", {
"form": form,
}, context_instance=RequestContext(request))
@login_required
def sponsor_detail(request, pk):
sponsor = get_object_or_404(Sponsor, pk=pk)
if not request.user.is_staff:
if sponsor.applicant != request.user:
return redirect("sponsor_list")
formset_kwargs = {
"instance": sponsor,
"queryset": SponsorBenefit.objects.filter(active=True)
}
if request.method == "POST":
form = SponsorDetailsForm(request.POST, user=request.user, instance=sponsor)
formset = SponsorBenefitsFormSet(request.POST, request.FILES, **formset_kwargs)
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
messages.success(request, "Sponsorship details have been updated")
return redirect("dashboard")
else:
form = SponsorDetailsForm(user=request.user, instance=sponsor)
formset = SponsorBenefitsFormSet(**formset_kwargs)
return render_to_response("sponsorship/detail.html", {
"sponsor": sponsor,
"form": form,
"formset": formset,
}, context_instance=RequestContext(request))
# with print logos and json reformat
@login_required
def export_sponsors(request):
if not request.user.is_staff:
raise Http404()
# use StringIO to make zip in memory, rather than on disk
f = StringIO.StringIO()
z = ZipFile(f, "w", ZIP_DEFLATED)
data = []
# collect the data and write web and print logo assets for each sponsor
for sponsor in Sponsor.objects.all():
data.append({
"name": sponsor.name,
"website": sponsor.external_url,
"description": sponsor.listing_text,
"contact name": sponsor.contact_name,
"contact email": sponsor.contact_email,
"level": str(sponsor.level),
}),
try:
logo = sponsor.website_logo
path = logo.path
z.write(path, str(sponsor.name)+"_weblogo"+os.path.splitext(path)[1])
except AttributeError:
pass
try:
print_logo = sponsor.print_logo
path = print_logo.path
z.write(path, str(sponsor.name)+"_printlogo"+os.path.splitext(path)[1])
except AttributeError:
pass
# write sponsor data to text file for zip
with open("sponsor_data.txt", "wb") as d:
json.dump(data, d, encoding="utf-8", indent=4)
z.write("sponsor_data.txt")
z.close()
response = HttpResponse(mimetype = "application/zip")
response["Content-Disposition"] = "attachment; filename=sponsor_file.zip"
f.seek(0)
response.write(f.getvalue())
f.close()
return response
|
import os
import numpy as np
import cv2
import torch
from PIL import Image
import torchvision
#from torchvision.transforms import ToTensor, ToPILImage
import random
import torch.nn as nn
import torch.nn.functional as F
import multiprocessing
import torch.optim as optim
import math
from functools import reduce
class res_tagger(nn.Module):
def __init__(self,
out_classes = 6000,
base_model = 'resnet50',
res_out_features = 2048):
super(res_tagger, self).__init__()
self.out_classes = out_classes
self.res_out_features = res_out_features
n = torch.hub.load('pytorch/vision:v0.6.0', base_model, pretrained=True)
self.res_net = nn.Sequential(*list(n.children())[:-1])
self.out_1 = nn.Linear(res_out_features, res_out_features)
self.out_2 = nn.Linear(res_out_features, out_classes)
def forward(self, ins):
#load images from batch
v = ins
#store batch size
batch_size = len(v)
#project resnet outputs to model dimension
v = F.leaky_relu(self.res_net(v))[:,:,0,0]
#print("v",v.shape)
v = F.leaky_relu(self.out_1(v))
v = self.out_2(v)
v = v.reshape([batch_size, self.out_classes])
v = torch.clamp(v, -10, 10)
out_v = v#F.log_softmax(v, dim=1)
return {"out": out_v}
class res_tag_2(nn.Module):
def __init__(self,
model_features = 1000,
out_classes = 6000,
input_size = 256,
base_model = 'resnet50',
res_out_features = 2048):
super(res_tag_2, self).__init__()
self.features = model_features
self.out_classes = out_classes
self.res_out_features = res_out_features
n = torch.hub.load('pytorch/vision:v0.6.0', base_model, pretrained=True)
self.res_net = nn.Sequential(*list(n.children())[:-1])
self.out_1 = nn.Linear(res_out_features, res_out_features)
self.out_2 = nn.Linear(res_out_features, out_classes)
def forward(self, ins):
#load images from batch
v = ins#['imgs']
#initialize cuda for images
v = v.cuda()
#store batch size
batch_size = len(v)
#project resnet outputs to model dimension
v = F.leaky_relu(self.res_net(v))[:,:,0,0]
#print("v",v.shape)
v = F.leaky_relu(self.out_1(v))
v = self.out_2(v)
v = v.reshape([batch_size, self.out_classes])
v = torch.clamp(v, -10, 10)
out_v = v#F.log_softmax(v, dim=1)
return {"out": out_v}
class res_tag_3(nn.Module):
def __init__(self,
model_features = 1000,
out_classes = 6000,
input_size = 256,
base_model = 'resnet50',
res_out_features = 2048,
softmax_mixture_n = 8):
super(res_tag_3, self).__init__()
self.features = model_features
self.out_classes = out_classes
self.res_out_features = res_out_features
self.softmax_mixture_n = softmax_mixture_n
n = torch.hub.load('pytorch/vision:v0.6.0', base_model, pretrained=True)
self.res_net = nn.Sequential(*list(n.children())[:-1])
self.out_1 = nn.Linear(res_out_features, res_out_features)
self.out_2 = nn.Linear(res_out_features, out_classes * softmax_mixture_n + softmax_mixture_n)
def forward(self, ins):
#load images from batch
v = ins#['imgs']
#initialize cuda for images
v = v.cuda()
#store batch size
batch_size = len(v)
#project resnet outputs to model dimension
v = F.leaky_relu(self.res_net(v))[:,:,0,0]
#print("v",v.shape)
v = F.leaky_relu(self.out_1(v))
v = self.out_2(v)
#print("v",v.shape)
head_weights = v[:,:self.softmax_mixture_n].unsqueeze(2)
head_weights = F.softmax(head_weights, dim = 1)
#print(" head_weights", head_weights.shape)
v = v[:,self.softmax_mixture_n:].reshape([batch_size, self.softmax_mixture_n, self.out_classes]).contiguous()
v = torch.clamp(v, -10, 10)
v = F.softmax(v, dim = 2)
v = head_weights * v
v = v.sum(dim = 1)
#print("v",v.shape)
v = v.reshape([batch_size, self.out_classes])
out_v = v#F.log_softmax(v, dim=1)
return {"out": out_v}#, "im1_class": im1_classes_v, "im2_class": im2_classes_v}
|
"""
field_parser.py
parser for field data
"""
import os
import numpy as np
from collections import OrderedDict
from .parser import foam_comment, parseFoamDict, printdict
class foamField:
"""Openfoam dict class that support read/write openfoam dict file
especially for the file in the "0" folder
"""
def __init__(self, fname=None):
self.fname = fname
#openfoam file string
self.header = ""
self.unit = ""
self.data = ""
#Openfoam nested dict data
self.foamData={}
if fname:
self.read(fname)
def __repr__(self):
printdict(self.foamData)
return ""
def __getitem__(self, key):
return self.foamData[key]
def read(self,fname):
"""Read openfoam file"""
with open(fname, "r") as f:
self.foamData=parseFoamDict(f.read())
return self.foamData
def _header_str(self):
#Write header string
info=""
for k,p in self.foamData['FoamFile'].items():
if(k=="location"): info+="\t" + f"{k:<10}\t\"{p}\";\n"
else: info+="\t" + f"{k:<10}\t{p};\n"
self.header=foam_comment+"FoamFile\n{\n%s}\n" %(info) + "\n"
return self.header
def _unit_str(self):
#Write unit string
self.unit="dimensions [%s];\n\n" %(" ".join([str(int(i)) for i in self.foamData['dimensions']]) )
self.unit+="\n"
return self.unit
def _data_str(self):
#Write internal and boundary field string
string=""
#internal field data
name='internalField'
string+= field2foam({name:self.foamData[name]}, level=0)
string+="\n"
#boundary field data
name='boundaryField'
string+= field2foam({name:self.foamData[name]}, level=0)
string+="\n"
self.data=string
return self.data
def write(self,fname):
"""Write dict fields into openfoam file"""
file_str=self._header_str()+self._unit_str()+self._data_str()
with open(fname, "w",newline='\n') as outf:
outf.write(file_str)
def countDictLevels(d):
#Count how many levels in a dict
return max(countDictLevels(v) if isinstance(v,dict) else 0 for v in d.values()) + 1
def field2foam(dict,level=0):
#convert foam dict data into foam string
lines=_field2foam(dict,level=level)
return "\n".join(lines) + "\n"
def _field2foam(foam_object,level=0, maxlength=50):
#recursive nested dict to foam dict string
#modified from https://github.com/napyk/foamfile
lines = []
if type(foam_object) in (list, tuple):
#print('Cond1',level)
for list_entry in foam_object:
if type(list_entry) in (list, tuple):
#print("\t 1l",list_entry)
lines.append("\t" * level + "(" + " ".join(_field2foam(list_entry, 0)) + ")")
elif type(list_entry) in (dict, OrderedDict):
#print("\t 1d",list_entry)
lines.append("\t" * level + "{")
lines += _field2foam(list_entry, level + 1)
lines.append("\t" * level + "}")
else:
#print("\t 1o",list_entry)
lines.append("\t" * level + str(list_entry))
elif type(foam_object) in (dict, OrderedDict):
#print("Cond2",level)
if len(foam_object) > 0:
tab_expander = max([len(i) for i in foam_object if type(i) is str]) + 1
for key, value in foam_object.items():
if type(value) in (dict, OrderedDict):
#print("\t 2d",key,value)
lines += ["\t" * level + f"{key}", "\t" * level + "{"]
lines += _field2foam(value, level + 1)
lines.append("\t" * level + "}")
elif type(value) in (list, tuple):
#print("\t 2l",key,value)
if (value[0] in ["uniform","nonuniform"]): #Special for field wrtier
lines += ["\t" * level + f"{key}"]
if(value[0] == "nonuniform"):
lines[-1] += " "+" ".join(_field2foam(value[:-2], 0)) + f" {int(value[2])}"
lines += ["\t" * level + "("]
lines += _field2foam(value[-1], level)
lines.append("\t" * level + ");")
if(value[0] == "uniform"):
lines[-1] += " " + " ".join(_field2foam(value, 0)) + ";"
else:
lines += ["\t" * level + f"{key}", "\t" * level + "("]
lines += _field2foam(value, level + 1)
lines.append("\t" * level + ");")
else:
if key in ["#include", "#includeIfPresent", "#includeEtc", "#includeFunc", "#remove"]:
lines.append("\t" * level + str(key).ljust(tab_expander) + str(value))
else:
#print("\t 2o",key,value)
lines.append("\t" * level + str(key).ljust(tab_expander) + str(value) + ";")
return lines |
from __future__ import print_function
import tensorflow as tf
import argparse
import os
from six.moves import cPickle
from model import LineModel,ReversedModel,PostModel
from six import text_type
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--forward_dir', type=str, default='save',
help='model directory to store forward checkpointed models')
parser.add_argument('--reversed_dir', type=str, default='reversed',
help='model directory to store reversed checkpointed models')
parser.add_argument('--post_dir', type=str, default='reversed',
help='model directory to store post checkpointed models')
parser.add_argument('-n', type=int, default=500,
help='number of characters to sample')
parser.add_argument('--prime', type=text_type, default=u' ',
help='prime text')
parser.add_argument('--sample', type=int, default=1,
help='0 to use max at each timestep, 1 to sample at '
'each timestep, 2 to sample on spaces')
args = parser.parse_args()
sample(args)
def sample(args):
forward_args, reversed_args,post_args = None,None, None
forward_chars, forward_vocab = None, None
reversed_chars, reversed_vocab = None, None
post_chars, post_vocab = None, None
# Load arguments
with open(os.path.join(args.forward_dir, 'config.pkl'), 'rb') as f:
forward_args = cPickle.load(f)
with open(os.path.join(args.reversed_dir, 'config.pkl'), 'rb') as f:
reversed_args = cPickle.load(f)
with open(os.path.join(args.post_dir, 'config.pkl'), 'rb') as f:
post_args = cPickle.load(f)
# Load vocabularies
with open(os.path.join(args.forward_dir, 'chars_vocab.pkl'), 'rb') as f:
forward_chars, forward_vocab = cPickle.load(f)
with open(os.path.join(args.reversed_dir, 'chars_vocab.pkl'), 'rb') as f:
reversed_chars, reversed_vocab = cPickle.load(f)
with open(os.path.join(args.forward_dir, 'chars_vocab.pkl'), 'rb') as f:
forward_chars, forward_vocab = cPickle.load(f)
model = LineModel(forward_args, training=False)
print("Hit")
forwardPass = None
backwardsPass = None
#postModel = PostModel(saved_args, training=False)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(args.forward_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
forwardPass = model.sample(sess, forward_chars, forward_vocab, args.n, args.prime,
args.sample)
print(forwardPass)
tf.reset_default_graph()
reversedModel = ReversedModel(reversed_args, training=False)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(args.reversed_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
backwardsPass = reversedModel.sample(sess, reversed_chars, reversed_vocab, args.n, "\nnwa",
args.sample)
print()
print(backwardsPass)
if __name__ == '__main__':
main()
|
<filename>util/mx_tools.py
import numpy as np
from util.misc import assert_shape
def project_points(calib, points3d):
"""
Projects 3D points using a calibration matrix.
Parameters:
points3d: ndarray of shape (nPoints, 3)
"""
assert points3d.ndim == 2 and points3d.shape[1] == 3
p = np.empty((len(points3d), 2))
p[:, 0] = points3d[:, 0] / points3d[:, 2] * calib[0, 0] + calib[0, 2]
p[:, 1] = points3d[:, 1] / points3d[:, 2] * calib[1, 1] + calib[1, 2]
return p
def calibration_matrix(points2d, points3d):
"""
Calculates camera calibration matrix (no distortion) from 3D points and their projection.
Only works if all points are away from the camera, eg all z coordinates>0.
Returns:
calib, reprojection error, x residuals, y residuals, x singular values, y singular values
"""
assert points2d.ndim == 2 and points2d.shape[1] == 2
assert points3d.ndim == 2 and points3d.shape[1] == 3
A = np.column_stack([points3d[:, 0] / points3d[:, 2], np.ones(len(points3d))])
px, resx, _, sx = np.linalg.lstsq(A, points2d[:, 0], rcond=None)
A = np.column_stack([points3d[:, 1] / points3d[:, 2], np.ones(len(points3d))])
py, resy, _, sy = np.linalg.lstsq(A, points2d[:, 1], rcond=None)
calib = np.eye(3)
calib[0, 0] = px[0]
calib[1, 1] = py[0]
calib[0, 2] = px[1]
calib[1, 2] = py[1]
# Calculate mean reprojection error
# p = np.empty((len(points3d), 2))
# p[:, 0] = points3d[:, 0] / points3d[:, 2] * calib[0, 0] + calib[0, 2]
# p[:, 1] = points3d[:, 1] / points3d[:, 2] * calib[1, 1] + calib[1, 2]
p = project_points(calib, points3d)
reproj = np.mean(np.abs(points2d - p))
return calib, reproj, resx, resy, sx, sy
def procrustes_depth(coords2d, coords3d, focal_length, verbose=False, approximate=False):
"""
Absolute depth prediction based on Mehta et al. (https://arxiv.org/pdf/1611.09813.pdf) .
Parameters:
pose3d: ndarray(nJoints, 3[x,y,z), the relative 3D coordinates
pose2d: ndarray(nJoints, 3[x,y]), the 2D coordinates, relative to the centerpoint of the camera
focal_length: scalar, focus distance
approximate: if True, uses the formula in https://arxiv.org/pdf/1611.09813.pdf, otherwise uses the solution without
any approximation. The latter gives better results.
Returns:
ndarray(3,), the optimal translation vector
"""
assert len(coords2d) == len(coords3d)
assert coords2d.ndim == 2
assert coords3d.ndim == 2
assert coords3d.shape[1] == 3
coords3d = coords3d[:, :2]
mean2d = np.mean(coords2d, axis=0, keepdims=True)
mean3d = np.mean(coords3d, axis=0, keepdims=True)
assert_shape(mean2d, (1, 2))
assert_shape(mean3d, (1, 2))
# orig method using an approximation (does not provide any visible speedup)
if approximate:
numer = np.sqrt(np.sum(np.square(coords3d - mean3d)))
denom = np.sqrt(np.sum(np.square(coords2d - mean2d)))
else:
# no cos approximation
numer = np.sum(np.square(coords3d - mean3d))
denom = np.trace(np.dot((coords2d - mean2d), (coords3d - mean3d).T))
if verbose:
print "proc: %f / %f" % (numer, denom)
return numer / denom * np.array([mean2d[0, 0], mean2d[0, 1], focal_length]) - np.array([mean3d[0, 0], mean3d[0, 1], 0])
def procrustes_translations(pose3d, pose2d, focus, cx, cy):
"""
Calculates the translations for a set of poses using optimal reprojection loss.
Parameters:
pose3d: ndarray(nPoses, nJoints, 3), the relative 3D coordinates
pose2d: ndarray(nPoses, nJoints, 3[x,y,score]), the 2D coordinates and OpenPose score.
focus: scalar, focus distance
cx,cy: scalars, camera principal points
Returns:
ndarray(nPoses, 3), the optimal translation vectors
"""
assert pose3d.shape == pose2d.shape
assert pose2d.ndim == 3, pose2d.ndim
pose2d = pose2d.copy()
pose3d = pose3d.copy()
t = np.zeros((len(pose3d), 3))
for i in range(len(pose3d)):
# only use visible joints
good = pose2d[i][:, 2] > 0.2
# assert np.all(np.sum(good) >= 4)
pose2d[i][:, 0] -= cx
pose2d[i][:, 1] -= cy
t[i] = procrustes_depth(pose2d[i][:, :2][good, :], pose3d[i][:][good, :], focus, approximate=False)
return t
def normalize_arr(data, mean, std):
""" Normalizes `data` by removing the mean and std parameters. """
assert mean.shape == std.shape
assert data.shape[-1:] == mean.shape
return (data - mean) / std
def insert_zero_joint(data, ind=14):
""" Adds back a hip with zeros in a hip-relative MuPo-TS pose. """
assert data.shape[-2] == 16 and data.ndim >= 2
shape = list(data.shape)
shape[-2] = 17
result = np.zeros(shape, dtype=data.dtype)
result[..., :ind, :] = data[..., :ind, :]
result[..., ind + 1:, :] = data[..., ind:, :]
return result
def remove_root(data, root_ind):
"""
Removes a joint from the dataset by moving to the origin and removing it from the array.
:param data: (nPoses, nJoints, 3) array
:param root_ind: index of the joint to be removed
:return: (nPoses, nJoints-1, 3) array
"""
assert data.ndim >= 3 and data.shape[-1] in (2, 3)
roots = data[..., [root_ind], :]
# roots = roots.reshape((len(roots), 1, 3))
data = data - roots
data = np.delete(data, root_ind, axis=-2)
return data
def remove_openpose_root(data, root_ind):
"""
Removes a joint from an openpose dataset by moving to the origin and removing it from the array.
:param data: (nPoses, nJoints, 3) array
:param root_ind: index of the joint to be removed
:return: (nPoses, nJoints-1, 3) array
"""
assert data.ndim >= 3 and data.shape[-1] == 3
roots = data[..., [root_ind], :2] # ndarray(...,1,2)
data[..., :2] = data[..., :2] - roots
data = np.delete(data, root_ind, axis=-2)
return data
# Normalize - first centrize with image data then normalize for
def img_normalize(data, width, height, scale_only=False):
"""
Normalizes a set of points by changing the coordinate system centered to the image
and scaling back by the image size. If ``scale_only`` is False than the coordinate system
recentering is not done.
:param data: (nFrames, nPoses, [x,y,score])
:return: ndarray(nFrames, nPoses, [x,y,score])
"""
assert data.shape[2] == 3 and data.ndim == 3
if scale_only:
data[:, :, 0] = data[:, :, 0] / width * 2
data[:, :, 1] = data[:, :, 1] / height * 2
else:
data[:, :, 0] = (data[:, :, 0] - width / 2) / width * 2
data[:, :, 1] = (data[:, :, 1] - height / 2) / height * 2
return data
def relative_pose_to_absolute(data3d, std3d, mean3d):
""" 3D result postprocess: the first 16*3 values are relative poses, the last one is the hip. """
assert data3d.ndim == 2 and data3d.shape[1] == 48
data3d = data3d.copy() * std3d + mean3d
data3d = data3d.reshape((len(data3d), 16, 3))
data3d = insert_zero_joint(data3d, ind=14)
return data3d
def combine_pose_and_trans(data3d, std3d, mean3d):
""" 3D result postprocess: the first 16*3 values are relative poses, the last one is the hip. """
assert data3d.ndim == 2 and data3d.shape[1] == 51
data3d = data3d * std3d + mean3d
hip = data3d[:, -3:]
rel_pose = data3d[:, :-3].reshape((len(data3d), 16, 3))
hip[:, 2] = np.exp(hip[:, 2])
root_ind = 14
rel_pose += hip[:, np.newaxis, :]
result = np.zeros((len(data3d), 17, 3), dtype='float32')
result[:, :root_ind, :] = rel_pose[:, :root_ind, :]
result[:, root_ind, :] = hip
result[:, root_ind + 1:, :] = rel_pose[:, root_ind:, :]
return result
|
# -*- coding: utf-8 -*-
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_hub as hub
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras import layers
from tensorflow.keras.layers import Input
import argparse
import os
from tensorflow.keras.applications import ResNet50V2
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="path to output face mask detector model")
args = vars(ap.parse_args())
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
data = []
labels = []
IMG_SIZE = 224
CHANNELS = 3
N_LABELS=2
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (224x224) and preprocess it
image = load_img(imagePath, target_size=(IMG_SIZE, IMG_SIZE))
image = img_to_array(image)
image = image/255
#image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
data = np.array(data, dtype="float32")
labels = np.array(labels)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.20, stratify=labels, random_state=42)
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
feature_extractor_layer = ResNet50V2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(IMG_SIZE,IMG_SIZE,CHANNELS)))
feature_extractor_layer.trainable = False
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Flatten(name="flatten"),
layers.Dense(1024, activation='relu', name='hidden_layer'),
layers.Dropout(0.5),
layers.Dense(N_LABELS, activation='sigmoid', name='output')
])
model.summary()
LR = 1e-5 # Keep it small when transfer learning
EPOCHS = 20
BS = 256
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=LR),
loss="binary_crossentropy",
metrics=["accuracy"])
import time
start = time.time()
history = model.fit(aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
epochs=EPOCHS)
print('\nTraining took {}'.format((time.time()-start)))
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs,target_names=lb.classes_))
print("[INFO] saving mask detector model...")
model.save(args["model"], save_format="h5")
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), history.history["accuracy"], label="accuracy")
plt.plot(np.arange(0, N), history.history["val_accuracy"], label="val_accuracy")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.