text stringlengths 38 1.54M |
|---|
import time
dang = time.strftime('%H:%M:%S')
try:
zong = input('Ur FILE => ')
fileName = zong
except:
print('You didn\'t supply a valid filename.')
exit()
with open(fileName, 'r') as f:
file = f.readlines()
wordList = []
badList = []
for line in file:
if line in wordList:
badList.append(line)
else:
wordList.append(line)
file = open(fileName, 'w')
for line in wordList:
file.write(line)
file.close()
print('[{0}]: {1} duplicate lines removed from {2}.'.format(dang, len(badList), fileName))
|
from pathlib import Path
import divsufsort as sa
import ctypes
SIMILARITY_THRESHOLD = 20
def get_tweet(data, index):
return (get_tweet_header_bytes(data, index) + get_tweet_ending_bytes(data, index)).decode("UTF-8")
def get_tweet_ending(data, index):
return get_tweet_ending_bytes(data, index).decode("UTF-8")
def get_tweet_header_bytes(data, index):
return data[data.rfind(0x01, 0, index)+1:index]
def get_tweet_ending_bytes(data, index):
return data[index:data.find(0x01, index, len(data))]
def process_data(data, res):
tweets = {}
tweet_template = get_tweet_ending(data, res[0])
tweet_similarity = 0
for i in range(1, len(data)):
try:
sim = similarity(tweet_template, get_tweet_ending(data, res[i]))
if sim >= SIMILARITY_THRESHOLD:
tweet_similarity += sim
else:
if tweet_similarity >= SIMILARITY_THRESHOLD * 100:
tweet = get_tweet(data, res[i-1])
tweets[tweet] = max(tweets[tweet], tweet_similarity) if tweet in tweets else tweet_similarity
tweet_template = get_tweet_ending(data, res[i])
tweet_similarity = 0
except:
continue
return tweets
def similarity(str1, str2):
result = 0
for i in range(min(len(str1), len(str2))):
if str1[i] != str2[i]:
break
result += 1
return result
def display_results(tweets):
with open('results', 'w') as result_file:
for k, v in sorted(tweets.items(), key=lambda x: x[1], reverse=True):
result_file.write("--------------------------------\n")
result_file.write("Similarity rank: %d\n\n" % v)
result_file.write("%s\n\n" % k)
data = Path("data").read_text().encode('UTF-8')
res = (ctypes.c_int * len(data))()
sa.divsufsort(data, res)
display_results(process_data(data, res))
|
import sys
import os
import time
sys.path.append("/Users/dantongzhu/Documents/Spring 2019/Machine Learning/project 2/ABAGAIL/ABAGAIL.jar")
import java.io.FileReader as FileReader
import java.io.File as File
import java.lang.String as String
import java.lang.StringBuffer as StringBuffer
import java.lang.Boolean as Boolean
import java.util.Random as Random
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import dist.Distribution as Distribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import opt.EvaluationFunction as EvaluationFunction
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.HillClimbingProblem as HillClimbingProblem
import opt.NeighborFunction as NeighborFunction
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.example.FourPeaksEvaluationFunction as FourPeaksEvaluationFunction
import opt.ga.CrossoverFunction as CrossoverFunction
import opt.ga.SingleCrossOver as SingleCrossOver
import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.ga.GeneticAlgorithmProblem as GeneticAlgorithmProblem
import opt.ga.MutationFunction as MutationFunction
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.ga.UniformCrossOver as UniformCrossOver
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
from array import array
"""
Commandline parameter(s):
none
"""
N=200
T=N/5
fill = [2] * N
ranges = array('i', fill)
ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
#Part 1: compare 4 algorithms
print "Part 1: compare 4 algorithms"
iterations = [50, 100, 200, 300, 500, 700, 1000, 3000, 4500, 6000, 7500, 9000, 10000]
n = 5
for iters in iterations:
print "iters = " + str(iters)
tot_eval_rhc = 0
tot_eval_sa = 0
tot_eval_ga = 0
tot_eval_mimic = 0
tot_time_rhc = 0
tot_time_sa = 0
tot_time_ga = 0
tot_time_mimic = 0
for i in range(1, n+1):
#RHC
start = time.time()
rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, iters)
fit.train()
tot_eval_rhc = tot_eval_rhc + ef.value(rhc.getOptimal())
end = time.time()
tot_time_rhc = tot_time_rhc + end - start
#SA
start = time.time()
sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, iters)
fit.train()
tot_eval_sa = tot_eval_sa + ef.value(sa.getOptimal())
end = time.time()
tot_time_sa = tot_time_sa + end - start
#GA
start = time.time()
ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, iters)
fit.train()
tot_eval_ga = tot_eval_ga + ef.value(ga.getOptimal())
end = time.time()
tot_time_ga = tot_time_ga + end - start
#MIMIC
start = time.time()
mimic = MIMIC(200, 20, pop)
fit = FixedIterationTrainer(mimic, iters)
fit.train()
tot_eval_mimic = tot_eval_mimic + ef.value(mimic.getOptimal())
end = time.time()
tot_time_mimic = tot_time_mimic + end - start
eval_rhc = tot_eval_rhc/n
eval_sa = tot_eval_sa/n
eval_ga = tot_eval_ga/n
eval_mimic = tot_eval_mimic/n
time_rhc = 1000*tot_time_rhc/n
time_sa = 1000*tot_time_sa/n
time_ga = 1000*tot_time_ga/n
time_mimic = 1000*tot_time_mimic/n
print "evaluation:"
print str(eval_rhc)
print str(eval_sa)
print str(eval_ga)
print str(eval_mimic)
print "time:"
print str(time_rhc)
print str(time_sa)
print str(time_ga)
print str(time_mimic)
print ""
# #Part 2: More iterations for MIMIC
# print "Part 2: More iterations for MIMIC"
# iterations = [20000, 50000, 80000, 120000]
# for iters in iterations:
# mimic = MIMIC(200, 20, pop)
# fit = FixedIterationTrainer(mimic, iters)
# fit.train()
# print "iters = " + str(iters) + ", " + str(ef.value(mimic.getOptimal()))
#Result:
# iters = 20000, 200.0
# iters = 50000, 200.0
# iters = 80000, 200.0
# iters = 120000, 200.0
|
import re
from bruh import command, r
from plugins.userlist import userlist
from drivers.walnut import Walnut
last_sender = {}
@command('karma')
@command('k')
def karma(irc):
karma = []
for k in r.hscan_iter(irc.key + ':karma'):
karma.append(k)
karma = sorted(karma, key = lambda v: v[1], reverse = True)
return 'Top Karma: ' + ', '.join(map(lambda v: b': '.join(v).decode('UTF-8'), karma))
@Walnut.hook('PRIVMSG')
def match_karma(message):
db_key = '{}:{}'.format(message.parent.frm, message.args[0])
nick = message.prefix.split('!')[0]
network = message.parent.frm
channel = message.args[0]
match = re.match(r'([\w\[\]\\`_\^\{\}\|-]+)(\+\+|--)', message.args[-1])
# Increment Karma through karma whoring means. Restricting this to every 30
# minutes doesn't seem to stop people whoring, but It's here anyway.
if match and match.group(1) in userlist[network][channel]:
success = r.setnx(db_key + ':karma:{}'.format(nick), '')
if success:
r.expire(db_key + ':karma:{}'.format(match.group(1)), 1800)
r.hincrby(db_key + ':karma', match.group(1), 1)
output = '{0} gained karma. {0} now has {1}'.format(
match.group(1),
r.hget(db_key + ':karma', match.group(1)).decode('UTF-8')
)
else:
output = 'You manipulated the waves too recently to affect {}\'s karma.'.format(match.group(1))
return 'PRIVMSG {} :{}'.format(
channel,
output
)
# Catch passive thanks and increment karma from it.
match = re.match(r'^thanks?(:?\syou)?(\s.+)?$', message.args[-1], re.I)
if match:
target = match.group(2) if match.group(2) else last_sender.get(channel, 'DekuNut')
success = r.setnx(db_key + ':thank:{}'.format(target.strip()), '')
if success:
r.expire(db_key + ':thank:{}'.format(target), 60)
r.hincrby(db_key + ':karma', target, 1)
return None
# Store the last sender if no karma-whoring was done. This is so when users
# thank without specifying a name, we can just grant the thanks to who we
# are assuming the thankee is.
last_sender[channel] = nick
|
# coding:utf-8
from components.buttons.circle_button import CircleButton
from components.slider import Slider, HollowHandleStyle
from PyQt5.QtCore import QPoint, Qt, pyqtSignal
from PyQt5.QtWidgets import QLabel, QWidget
from View.play_bar import MoreActionsMenu
from .play_bar_buttons import (FullScreenButton, LoopModeButton, PlayButton,
PullUpArrow, RandomPlayButton, VolumeButton)
from .volume_slider_widget import VolumeSliderWidget
class PlayBar(QWidget):
""" 播放栏 """
# 鼠标进入信号
enterSignal = pyqtSignal()
leaveSignal = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
# 创建小部件
self.__createWidget()
# 初始化
self.__initWidget()
def __createWidget(self):
""" 创建小部件 """
self.moreActionsMenu = MoreActionsMenu(self, 0)
self.playButton = PlayButton(self)
self.volumeButton = VolumeButton(self)
self.volumeSliderWidget = VolumeSliderWidget(self.window())
self.FullScreenButton = FullScreenButton(self)
self.playProgressBar = PlayProgressBar("3:10", parent=self)
self.pullUpArrowButton = PullUpArrow(
":/images/playing_interface/ChevronUp.png", self)
self.lastSongButton = CircleButton(
":/images/playing_interface/Previous.png", self)
self.nextSongButton = CircleButton(
":/images/playing_interface/Next.png", self)
self.randomPlayButton = RandomPlayButton(
[":/images/playing_interface/randomPlay_47_47.png"], self)
self.loopModeButton = LoopModeButton(
[
":/images/playing_interface/RepeatAll.png",
":/images/playing_interface/RepeatOne.png",
],
self,
)
self.moreActionsButton = CircleButton(
":/images/playing_interface/More.png", self)
self.showPlaylistButton = CircleButton(
":/images/playing_interface/Playlist_47_47.png", self)
self.smallPlayModeButton = CircleButton(
":/images/playing_interface/SmallestPlayMode.png", self)
# 创建小部件列表
self.__widget_list = [
self.playButton,
self.FullScreenButton,
self.playProgressBar.progressSlider,
self.pullUpArrowButton,
self.lastSongButton,
self.nextSongButton,
self.randomPlayButton,
self.loopModeButton,
self.moreActionsButton,
self.showPlaylistButton,
self.smallPlayModeButton,
]
def __initWidget(self):
""" 初始化小部件 """
self.setFixedHeight(193)
self.setAttribute(Qt.WA_TranslucentBackground)
self.lastSongButton.move(17, 85)
self.playButton.move(77, 85)
self.nextSongButton.move(137, 85)
self.randomPlayButton.move(197, 85)
self.loopModeButton.move(257, 85)
self.volumeButton.move(317, 85)
self.moreActionsButton.move(387, 85)
self.volumeSliderWidget.hide()
self.playProgressBar.move(0, 45)
self.__moveButtons()
# 信号连接到槽
self.__connectSignalToSlot()
# 引用小部件及其方法
self.__referenceWidget()
def __showVolumeSlider(self):
""" 显示音量滑动条 """
# 显示播放栏
if not self.volumeSliderWidget.isVisible():
pos = self.mapToGlobal(self.volumeButton.pos())
x = pos.x() + int(
self.volumeButton.width() / 2 - self.volumeSliderWidget.width() / 2
)
y = self.y() + 15
self.volumeSliderWidget.move(x, y)
self.volumeSliderWidget.show()
else:
# 隐藏音量条
self.volumeSliderWidget.hide()
def __moveButtons(self):
""" 移动按钮 """
self.pullUpArrowButton.move(
self.width()//2 - self.pullUpArrowButton.width()//2, 165)
self.FullScreenButton.move(self.width() - 64, 85)
self.smallPlayModeButton.move(self.width() - 124, 85)
self.showPlaylistButton.move(self.width() - 184, 85)
def resizeEvent(self, e):
""" 改变尺寸时移动按钮 """
super().resizeEvent(e)
self.playProgressBar.resize(self.width(), 38)
self.__moveButtons()
def enterEvent(self, e):
""" 鼠标进入时发出进入信号 """
self.enterSignal.emit()
def leaveEvent(self, e):
""" 鼠标离开时发出离开信号 """
self.leaveSignal.emit()
def __referenceWidget(self):
""" 引用小部件及其方法 """
self.progressSlider = self.playProgressBar.progressSlider
self.setCurrentTime = self.playProgressBar.setCurrentTime
self.setTotalTime = self.playProgressBar.setTotalTime
def __showMoreActionsMenu(self):
""" 显示更多操作菜单 """
globalPos = self.mapToGlobal(self.moreActionsButton.pos())
x = globalPos.x() + self.moreActionsButton.width() + 10
y = globalPos.y() + self.moreActionsButton.height()//2 - 114/2
self.moreActionsMenu.exec(QPoint(x, y))
def __connectSignalToSlot(self):
""" 信号连接到槽 """
self.moreActionsButton.clicked.connect(self.__showMoreActionsMenu)
self.volumeButton.clicked.connect(self.__showVolumeSlider)
self.volumeSliderWidget.muteStateChanged.connect(
self.volumeButton.setMute)
self.volumeSliderWidget.volumeLevelChanged.connect(
self.volumeButton.updateIcon)
for widget in self.__widget_list:
widget.clicked.connect(self.volumeSliderWidget.hide)
class PlayProgressBar(QWidget):
""" 歌曲播放进度条 """
def __init__(self, duration: str = "0:00", parent=None):
super().__init__(parent)
# 创建两个标签和一个进度条
self.progressSlider = Slider(Qt.Horizontal, self)
self.currentTimeLabel = QLabel("0:00", self)
self.totalTimeLabel = QLabel(duration, self)
# 初始化界面
self.__initWidget()
def __initWidget(self):
""" 初始化小部件 """
self.setFixedHeight(24)
self.progressSlider.move(73, 0)
# 设置样式
style = HollowHandleStyle({
"handle.ring-width": 3,
"handle.hollow-radius": 9,
"handle.margin": 0
})
self.progressSlider.setStyle(style)
self.progressSlider.setFixedHeight(24)
self.currentTimeLabel.setObjectName("timeLabel")
self.totalTimeLabel.setObjectName("timeLabel")
def setCurrentTime(self, currentTime: int):
""" 更新当前时间标签
Parameters
----------
currentTime: int
毫秒时间"""
seconds, minutes = self.getSecondMinute(currentTime)
self.currentTimeLabel.setText(f'{minutes}:{str(seconds).rjust(2,"0")}')
self.currentTimeLabel.move(
33 - 9 * (len(self.totalTimeLabel.text()) - 4), 1)
def setTotalTime(self, totalTime):
""" 更新总时长标签,totalTime的单位为ms """
seconds, minutes = self.getSecondMinute(totalTime)
self.totalTimeLabel.setText(f'{minutes}:{str(seconds).rjust(2,"0")}')
def getSecondMinute(self, time):
""" 将毫秒转换为分和秒 """
seconds = int(time / 1000)
minutes = seconds // 60
seconds -= minutes * 60
return seconds, minutes
def resizeEvent(self, e):
""" 改变尺寸时拉伸进度条 """
self.progressSlider.resize(self.width() - 146, 24)
self.totalTimeLabel.move(self.width() - 57, 1)
self.currentTimeLabel.move(33, 1)
super().resizeEvent(e)
|
from apps.card.models import *
import xadmin
class CardAdmin(object):
pass
class UserCardAdmin(object):
pass
xadmin.site.register(Card, CardAdmin)
xadmin.site.register(UserCard, UserCardAdmin)
|
def solution(N, A):
result = [0] * N
max_counter = 0
min_value = 0
for item in A:
if item <= N:
result[item-1] = max(result[item-1], min_value)
result[item-1] += 1
max_counter = max(max_counter, result[item-1])
else:
min_value = max_counter
for i in range(N):
if result[i] < min_value:
result[i] = min_value
return result
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# 2015 Techdealer
##############BIBLIOTECAS A IMPORTAR E DEFINICOES####################
import urllib,urllib2,re,os,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time
import json
h = HTMLParser.HTMLParser()
addon_id = 'plugin.video.footazo'
selfAddon = xbmcaddon.Addon(id=addon_id)
addonfolder = selfAddon.getAddonInfo('path')
artfolder = '/resources/img/'
addonFanart = os.path.join(addonfolder,'fanart.jpg')
footazo_url = 'http://www.footazo.com/'
##################################################
#MENUS############################################
def MENU():
addDir('+ Recentes',footazo_url,1,'')
addDir('Pesquisar',footazo_url,2,'')
addDir('Humor',footazo_url+'videos/humor/',1,'')
addDir('Bizarro',footazo_url+'videos/bizarro/',1,'')
addDir('Quase!',footazo_url+'videos/quase/',1,'')
addDir('Wow!',footazo_url+'videos/wow/',1,'')
addDir('Ouch!',footazo_url+'videos/ouch/',1,'')
addDir('Entrevistas',footazo_url+'videos/entrevistas/',1,'')
addDir('Belas',footazo_url+'videos/belas/',1,'')
addDir('Outros',footazo_url+'videos/outros/',1,'')
###################################################################################
#FUNCOES
def Listar_Videos(url):
codigo_fonte = abrir_url(url)
match = re.compile('<div class="indexpost">.*?<div class="posttitle"><a.*?href="(.+?)".*?>(.+?)</a></div>.*?<div class="datalineleft">(.+?) \|.*?<div class="postimage">.*?<img.*?src="(.+?)".*?>',re.DOTALL).findall(codigo_fonte)
for link, name, data, iconimage in match:
addLink('[COLOR yellow]'+data+'[/COLOR] - '+name,link,4,iconimage)
match = re.search("<span class='current'>.*?</span><a href='(.+?)' class='page larger'>(.+?)</a>",codigo_fonte)
if match:
addDir('[COLOR blue]Página '+match.group(2)+' >>[/COLOR]',match.group(1),1,'')
def Pesquisar(url):
if url==footazo_url:
addDir('[B]Pesquisar novamente...[/B]',footazo_url,2,'')
keyb = xbmc.Keyboard('', 'Pesquisar por...')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
if search=='':
sys.exit(0)
search=urllib.quote(search)
url = url + '?s=' + search
else:
sys.exit(0)
codigo_fonte = abrir_url(url)
match = re.compile('<div class="indexpost">.*?<div class="posttitle"><a.*?href="(.+?)".*?>(.+?)</a></div>.*?<div class="datalineleft">(.+?) \|.*?<div class="postimage">.*?<img.*?src="(.+?)".*?>',re.DOTALL).findall(codigo_fonte)
for link, name, data, iconimage in match:
addLink('[COLOR yellow]'+data+'[/COLOR] - '+name,link,4,iconimage)
match = re.search("<span class='current'>.*?</span><a href='(.+?)' class='page larger'>(.+?)</a>",codigo_fonte)
if match:
addDir('[COLOR blue]Página '+match.group(2)+' >>[/COLOR]',match.group(1),2,'')
def Descricao(url,name):
progress = xbmcgui.DialogProgress()
progress.create('Footazo', 'Carregando a descrição e comentários...')
progress.update(0)
codigo_fonte = abrir_url(url)
post_content = re.search('<div class="postcontentsingle">(.+?)<div style="float:left;">', codigo_fonte, re.DOTALL)
if post_content:
post_content = cleanhtml(post_content.group(1))
else:
post_content = 'Post sem conteúdo'
comments = ''
codigo_fonte = abrir_url('https://graph.facebook.com/comments/?ids='+url)
decoded_data = json.loads(codigo_fonte)
if len(decoded_data[url]['comments']['data'])>0:
for x in range(0, len(decoded_data[url]['comments']['data'])):
facebook_name = decoded_data[url]['comments']['data'][x]['from']['name'].encode('utf-8')
facebook_like_count = str(decoded_data[url]['comments']['data'][x]['like_count'])
facebook_message = decoded_data[url]['comments']['data'][x]['message'].encode('utf-8')
if facebook_message.endswith('\n'):
facebook_message = facebook_message[:-1]
comments = comments+'[COLOR blue]'+facebook_name+'[/COLOR] disse: [I]('+facebook_like_count+' likes)[/I]\n'+facebook_message+'\n\n'
codigo_fonte_2 = abrir_url('https://graph.facebook.com/'+decoded_data[url]['comments']['data'][x]['id']+'/comments/')
decoded_data_2 = json.loads(codigo_fonte_2)
if len(decoded_data_2['data'])>0:
for x in range(0, len(decoded_data_2['data'])):
facebook_name_reply = decoded_data_2['data'][x]['from']['name'].encode('utf-8')
facebook_like_count_reply = str(decoded_data_2['data'][x]['like_count'])
facebook_message_reply = decoded_data_2['data'][x]['message'].encode('utf-8')
if facebook_message_reply.endswith('\n'):
facebook_message_reply = facebook_message_reply[:-1]
comments = comments+'[COLOR orange][B]»»[/B][/COLOR] Resposta de [COLOR orange]'+facebook_name_reply+'[/COLOR]: [I]('+facebook_like_count_reply+' likes)[/I]\n'+facebook_message_reply+'\n\n'
if comments == '':
comments = '[COLOR red]Sem comentários.[/COLOR]\n\n'
if progress.iscanceled():
sys.exit(0)
progress.update(100)
progress.close()
xbmc.executebuiltin("ActivateWindow(10147)")
window = xbmcgui.Window(10147)
xbmc.sleep(100)
window.getControl(1).setLabel( "%s - %s" % (name,'Footazo',))
window.getControl(5).setText('[COLOR green][B]Descrição:[/B][/COLOR]\n'+post_content+'\n\n[COLOR green][B]Comentários:[/B][/COLOR]\n'+comments+'Você poderá também deixar o seu comentário visitando:\n[I]'+url+'[/I]\n a partir do seu browser.')
def Procurar_fontes(url,name,iconimage):
progress = xbmcgui.DialogProgress()
progress.create('Footazo', 'Procurando fonte...')
progress.update(0)
playlist = xbmc.PlayList(1)
playlist.clear()
try:
codigo_fonte = abrir_url(url)
except:
codigo_fonte = ''
if codigo_fonte:
html_source_trunk = re.findall('<iframe(.*?)</iframe>', codigo_fonte, re.DOTALL)
for trunk in html_source_trunk:
try:
iframe = re.compile('src=["\'](.+?)["\']').findall(trunk)[0]
except:
iframe = ''
if iframe:
if iframe.find('youtube') > -1:
resolver_iframe = youtube_resolver(iframe)
if resolver_iframe != 'youtube_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('dailymotion') > -1:
resolver_iframe = daily_resolver(iframe)
if resolver_iframe != 'daily_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('vimeo.com') > -1:
resolver_iframe = vimeo_resolver(iframe)
if resolver_iframe != 'vimeo_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('vk.com') > -1:
resolver_iframe = vkcom_resolver(iframe)
if resolver_iframe != 'vkcom_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('facebook.com/video/embed') > -1:
resolver_iframe = facebook_resolver(iframe)
if resolver_iframe != 'facebook_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('rutube.ru') > -1:
resolver_iframe = rutube_resolver(iframe)
if resolver_iframe != 'rutube_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('videa.hu') > -1:
resolver_iframe = videa_resolver(iframe)
if resolver_iframe != 'videa_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('videos.sapo.pt') > -1:
resolver_iframe = sapo_resolver(iframe)
if resolver_iframe != 'sapo_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('vine.co') > -1:
resolver_iframe = vine_resolver(iframe)
if resolver_iframe != 'vine_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('alkislarlayasiyorum.com') > -1:
resolver_iframe = alkislarlayasiyorum_resolver(iframe)
if resolver_iframe != 'alkislarlayasiyorum_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('videolog.tv') > -1:
resolver_iframe = videologtv_resolver(iframe)
if resolver_iframe != 'videologtv_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('zideo.nl') > -1:
resolver_iframe = zideonl_resolver(iframe)
if resolver_iframe != 'videologtv_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('liveleak.com/ll_embed') > -1:
resolver_iframe = liveleak_resolver(iframe)
if resolver_iframe != 'liveleak_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('v.kiwi.kz/v2/') > -1:
resolver_iframe = kiwikz_resolver(iframe)
if resolver_iframe != 'kiwikz_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
#playwire embed player - "old"
html_playwire_embed = re.findall('<script.*?".*?src\=".*?cdn\.playwire\.com/bolt/js/embed\.min\.js".*?data\-publisher-id\="(.+?)".*?data\-video\-id\="(.+?)".*?>', codigo_fonte)
for data_publisher, video_id in html_playwire_embed:
try:
codigo_fonte_2 = abrir_url('http://cdn.playwire.com/v2/'+data_publisher+'/config/'+video_id+'.json')
except:
continue
decoded_data = json.loads(codigo_fonte_2)
if decoded_data['src'].endswith('.f4m'):
try:
codigo_fonte_3 = abrir_url(decoded_data['src'])
except:
continue
match = re.search("<baseURL>(.+?)</baseURL>.*?url\=\"(.+?)\"",codigo_fonte_3,re.DOTALL)
if match:
playlist.add(match.group(1)+'/'+match.group(2),xbmcgui.ListItem(name, thumbnailImage=iconimage))
else:
playlist.add(decoded_data['src'],xbmcgui.ListItem(name, thumbnailImage=iconimage))
#playwire embed player - "new"
html_playwire_embed = re.findall('\<script.*?data\-config\="(.+?)".*?>', codigo_fonte)
for video_id in html_playwire_embed:
codigo_fonte_2 = abrir_url(video_id)
decoded_data = json.loads(codigo_fonte_2)
if decoded_data['src'].endswith('.f4m'):
try:
codigo_fonte_3 = abrir_url(decoded_data['src'])
except:
continue
match = re.search("<baseURL>(.+?)</baseURL>.*?url\=\"(.+?)\"",codigo_fonte_3,re.DOTALL)
if match:
playlist.add(match.group(1)+'/'+match.group(2),xbmcgui.ListItem(name, thumbnailImage=iconimage))
else:
playlist.add(decoded_data['src'],xbmcgui.ListItem(name, thumbnailImage=iconimage))
#youtube embed em flash
match = re.compile('<embed src=".*?youtube.com/v/([^?"]+).*?"').findall(codigo_fonte)
if match:
for youtube_id in match:
playlist.add('plugin://plugin.video.youtube/?action=play_video&videoid='+youtube_id,xbmcgui.ListItem(name, thumbnailImage=iconimage))
#longtailvideo.com resolver
match=re.compile("<embed.*?flashvars=\"file=([^&\"]+).*?\".*?src=\"http://player.longtailvideo.com/player5.2.swf\".*?>").findall(codigo_fonte)
if match:
for link in match:
playlist.add(link,xbmcgui.ListItem(name, thumbnailImage=iconimage))
#player.mais.uol.com.br
match=re.compile('<embed.*?src="http://player.mais.uol.com.br/embed_v2.swf\?.*?mediaId=([^&"]+).*?".*?>').findall(codigo_fonte)
if match:
for mediaid in match:
codigo_fonte_2 = abrir_url('http://mais.uol.com.br/apiuol/player/media.js?p=undefined&mediaId='+mediaid+'&action=showPlayer&types=V')
match_2 = re.search('"formats": \[{.*?"url":"(.+?)".*?}', codigo_fonte_2)
if match_2:
dummy=abrir_url('http://mais.uol.com.br/crossdomain.xml')
dummy=abrir_url('http://mais.uol.com.br/notifyMediaView?t=v&v=2&mediaId='+mediaid)
playlist.add(match_2.group(1)+'?ver=0&start=0&r='+urllib.quote_plus('http://player.mais.uol.com.br/embed_v2.swf?mediaId='+mediaid+'&tv=0')+'|referer=http://player.mais.uol.com.br/embed_v2.swf?mediaId='+mediaid,xbmcgui.ListItem(name, thumbnailImage=iconimage))
#player.ooyala.com não suportado - total 2 videos
#meta.ua não suportado - total 1 video
#wat.tv não suportado - total 1 video
if progress.iscanceled():
sys.exit(0)
progress.update(100)
progress.close()
if len(playlist) == 0:
dialog = xbmcgui.Dialog()
ok = dialog.ok('Footazo', 'Nenhuma fonte suportada encontrada...')
else:
try:
xbmc.Player().play(playlist)
except:
pass
def youtube_resolver(url):
match = re.compile('.*?youtube.com/embed/([^?"]+).*?').findall(url)
if match:
return 'plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])
else: return 'youtube_nao resolvido'
def daily_resolver(url):
if url.find('syndication') > -1: match = re.compile('/embed/video/(.+?)\?syndication').findall(url)
else: match = re.compile('/embed/video/(.*)').findall(url)
if match:
return 'plugin://plugin.video.dailymotion_com/?mode=playVideo&url=' + str(match[0])
else: return 'daily_nao resolvido'
def vimeo_resolver(url):
match = re.compile('/([0-9]+)').findall(url)
if match:
return 'plugin://plugin.video.vimeo/?action=play_video&videoid=' + str(match[0])
else: return 'vimeo_nao resolvido'
def vkcom_resolver(url):
match = re.compile('http://vk.com/video_ext.php\?oid=([\d]+?)&.*?id=([\d]+?)&.*?hash=([A-Za-z0-9]+).*?').findall(url)
if match != None:
for oid, id, hash in match:
codigo_fonte_2 = abrir_url('http://vk.com/video_ext.php?oid=' + oid + '&id=' + id + '&hash=' + hash)
match_2 = re.search('url1080=(.+?).1080.mp4', codigo_fonte_2)
if match_2 != None:
return match_2.group(1)+'.1080.mp4'
match_2 = re.search('url720=(.+?).720.mp4', codigo_fonte_2)
if match_2 != None:
return match_2.group(1)+'.720.mp4'
match_2 = re.search('url480=(.+?).480.mp4', codigo_fonte_2)
if match_2 != None:
return match_2.group(1)+'.480.mp4'
match_2 = re.search('url360=(.+?).360.mp4', codigo_fonte_2)
if match_2 != None:
return match_2.group(1)+'.360.mp4'
match_2 = re.search('url240=(.+?).240.mp4', codigo_fonte_2)
if match_2 != None:
return match_2.group(1)+'.240.mp4'
return 'vkcom_nao resolvido'
else:
return 'vkcom_nao resolvido'
def facebook_resolver(url):
try:
result = abrir_url(url)
url = re.compile('"params","(.+?)"').findall(result)[0]
url = re.sub(r'\\(.)', r'\1', urllib.unquote_plus(url.decode('unicode_escape')))
url = re.compile('_src":"(.+?)"').findall(url)[0]
return url
except:
return 'facebook_nao resolvido'
def rutube_resolver(url):
try:
url = url.split("/")[-1].split("?")[0]
codigo_fonte = abrir_url('http://rutube.ru/api/play/trackinfo/'+url+'/?format=json')
return json.loads(codigo_fonte)['video_balancer']['m3u8']
except:
return 'rutube_nao resolvido'
def videa_resolver(url):
try:
url = url.rsplit("v=", 1)[-1].rsplit("-", 1)[-1]
url = 'http://videa.hu/flvplayer_get_video_xml.php?v='+url
result = abrir_url(url)
url = re.compile('video_url="(.+?)"').findall(result)[0]
return url
except:
return 'videa_nao resolvido'
def sapo_resolver(url):
try:
id = url.split("file=")[-1].split("sapo.pt/")[-1].split("/")[0]
url = '%s/%s' % ('http://videos.sapo.pt', id)
result = abrir_url(url)
match = re.search('<meta property="og:video" content="http://imgs.sapo.pt/sapovideo/swf/flvplayer-sapo.swf\?file=(.+?)/mov.+?"/>', result)
if match != None:
tmp_url = match.group(1) + '/mov'
req = urllib2.Request(tmp_url)
res = urllib2.urlopen(req)
url = res.geturl()
return url
else:
return 'sapo_nao resolvido'
except:
return 'sapo_nao resolvido'
def vine_resolver(url):
try:
codigo_fonte = abrir_url(url)
match = re.search("var videoUrl = '(.+?)';", codigo_fonte)
if match:
return match.group(1)
else:
return 'vine_nao resolvido'
except:
return 'vine_nao resolvido'
def alkislarlayasiyorum_resolver(url):
try:
url = url.split("/")[-1].split("?")[0]
codigo_fonte = abrir_url('http://alkislarlayasiyorum.com/api/playerJson/ay_embed/'+url)
return json.loads(codigo_fonte)['streamurl']
except:
return 'alkislarlayasiyorum_nao resolvido'
def videologtv_resolver(url):
try:
url = url.split("id_video=")[-1].split("?")[0]
codigo_fonte = abrir_url('http://api.videolog.tv/video/'+url+'.json')
return json.loads(codigo_fonte)['video']['url_mp4']
except:
return 'videologtv_nao resolvido'
def zideonl_resolver(url):
try:
url = url.split("playzideo=")[-1].split("?")[0]
codigo_fonte = abrir_url('http://www.zideo.nl/player/iframe?playzideo='+url)
match = re.search('<div.*?id="videoFile".*?>(.+?)</div>', codigo_fonte)
if match:
return match.group(1)
else:
return 'vine_nao resolvido'
except:
return 'videologtv_nao resolvido'
def liveleak_resolver(url):
try:
codigo_fonte = abrir_url(url)
match = re.search('file: "(.+?)",', codigo_fonte)
if match:
return match.group(1)
else:
return 'liveleak_nao resolvido'
except:
return 'liveleak_nao resolvido'
def kiwikz_resolver(url):
try:
codigo_fonte = abrir_url(url)
return urllib.unquote(re.compile('&url=(.+?)&poster').findall(codigo_fonte)[0])
except:
return 'kiwikz_nao resolvido'
###################################################################################
#FUNÇÕES GERAIS
def abrir_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:28.0) Gecko/20100101 Firefox/28.0')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def addLink(name,url,mode,iconimage):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
liz.setProperty("Fanart_Image", addonFanart)
liz.addContextMenuItems( [("Ver descrição e comentários", 'RunPlugin(plugin://'+addon_id+'/?mode=3&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(name)+')')] )
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=False)
return ok
def addDir(name,url,mode,iconimage,folder=True):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setProperty("Fanart_Image", addonFanart)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=folder)
def cleanhtml(raw_html):
cleanr =re.compile('<.*?>')
cleantext = re.sub(cleanr,'', raw_html)
cleantext = re.sub('\s+',' ',cleantext)
return cleantext
############################################################################################################
# GET PARAMS #
############################################################################################################
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
mode=None
iconimage=None
try: url=urllib.unquote_plus(params["url"])
except: pass
try: name=urllib.unquote_plus(params["name"])
except: pass
try: mode=int(params["mode"])
except: pass
try: iconimage=urllib.unquote_plus(params["iconimage"])
except: pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
print "Iconimage: "+str(iconimage)
###############################################################################################################
# MODOS #
###############################################################################################################
if mode==None: MENU()
elif mode==1: Listar_Videos(url)
elif mode==2: Pesquisar(url)
elif mode==3: Descricao(url,name)
elif mode==4: Procurar_fontes(url,name,iconimage)
xbmcplugin.endOfDirectory(int(sys.argv[1])) |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import sys
import pytest
import papermill as pm
from tests.notebooks_common import OUTPUT_NOTEBOOK, KERNEL_NAME
TOL = 0.05
ABS_TOL = 0.05
@pytest.mark.spark
@pytest.mark.integration
def test_als_pyspark_integration(notebooks):
notebook_path = notebooks["als_pyspark"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(TOP_K=10, MOVIELENS_DATA_SIZE="1m"),
)
nb = pm.read_notebook(OUTPUT_NOTEBOOK)
results = nb.dataframe.set_index("name")["value"]
assert results["map"] == pytest.approx(0.00201, rel=TOL, abs=ABS_TOL)
assert results["ndcg"] == pytest.approx(0.02516, rel=TOL, abs=ABS_TOL)
assert results["precision"] == pytest.approx(0.03172, rel=TOL, abs=ABS_TOL)
assert results["recall"] == pytest.approx(0.009302, rel=TOL, abs=ABS_TOL)
assert results["rmse"] == pytest.approx(0.8621, rel=TOL, abs=ABS_TOL)
assert results["mae"] == pytest.approx(0.68023, rel=TOL, abs=ABS_TOL)
assert results["exp_var"] == pytest.approx(0.4094, rel=TOL, abs=ABS_TOL)
assert results["rsquared"] == pytest.approx(0.4038, rel=TOL, abs=ABS_TOL)
@pytest.mark.spark
@pytest.mark.integration
@pytest.mark.skip(reason="It takes too long in the current test machine")
@pytest.mark.skipif(sys.platform == 'win32', reason="Not implemented on Windows")
def test_mmlspark_lightgbm_criteo_integration(notebooks):
notebook_path = notebooks["mmlspark_lightgbm_criteo"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
parameters=dict(
DATA_SIZE="full",
NUM_ITERATIONS=50,
EARLY_STOPPING_ROUND=10
)
)
nb = pm.read_notebook(OUTPUT_NOTEBOOK)
results = nb.dataframe.set_index("name")["value"]
assert results["auc"] == pytest.approx(0.68895, rel=TOL, abs=ABS_TOL)
|
# This script is used for saving the individual fasta files and the info table for a certain
# coalHMM run. The start and end coordinated for the maf slicing are supplied by the first
# two areguments respectively, and the run number is specified as a third argument. All
# in all, this script can be run using:
#
# python create_fasta_and_info_table.py start_coord end_coord run_number
from Bio import AlignIO
from Bio.AlignIO import MafIO
import pandas as pd
import sys
# Save the run index
run = int(sys.argv[1])
target_seqname = sys.argv[2]
# Load mafindex
idx = MafIO.MafIndex('../tmp/filtered.mafindex', '../tmp/filtered.maf', target_seqname)
# Parse the alignment
results = idx.search([int(sys.argv[3])], [int(sys.argv[4])])
# Create an empty dataframe
df = pd.DataFrame(columns = ['file', 'species', 'chr', 'start', 'gaps'])
with open('../tmp/fasta_names/run_{}.txt'.format(run), 'w') as f:
# For each of the alignments
for i, align in enumerate(results):
f.write('fasta_{}.fa\n'.format(i))
# Create empty dictionary
dct = {'species':[], 'chr':[], 'start':[],'gaps':[]}
# For each of the records
for record in align:
record.id = record.name.split('.')[0]
record.description = record.name.split('.')[0]
# Retrieve species
dct['species'].append(record.name.split('.')[0])
# Retrieve chromosome/contig
dct['chr'].append('.'.join(record.name.split('.')[1:]))
# Retrieve start coordinate
dct['start'].append(record.annotations['start'])
# Retrieve gaps encoded in a binary format
dct['gaps'].append(''.join([str(0) if n=='-' else str(1) for n in record.seq]))
# Save individual fasta file
AlignIO.write(align, '../tmp/inputs/run_{}/fasta_{}.fa'.format(run, i), "fasta")
# Convert dictionary to data frame
file_df = pd.DataFrame.from_dict(dct)
# Insert column mapping to the file
file_df.insert(0, 'file', i, True)
# Append rows to overall data frame
df = df.append(file_df)
# Save the csv file
df.to_csv('../tmp/info_tables/run_{}.csv'.format(run), index=False)
|
#/usr/bin/env python3
# a program to read a .words file and write a file in vardial3 training format,
# which is
# text\tQ\tDIA
import sys
inf = open(sys.argv[1])
for line in inf:
line = line.strip()
sp = line.find(' ')
print(line[sp+1:]+'\tQ\t'+line[:3])
|
def _collate_data(collation, first_axis, second_axis):
"""
Collects information about the number of edit actions belonging to keys in
a supplied dictionary of object or changeset ids.
Parameters
----------
collation : dict
A dictionary of OpenStreetMap object or changeset ids.
first_axis : string
An object or changeset key for the collation to be performed on.
second_axis : {'create','modify','delete'}
An action key to be added to the first_axis key.
"""
if first_axis not in collation:
collation[first_axis] = {}
collation[first_axis]["create"] = 0
collation[first_axis]["modify"] = 0
collation[first_axis]["delete"] = 0
first = collation[first_axis]
first[second_axis] = first[second_axis] + 1
collation[first_axis] = first
def extract_changesets(objects):
"""
Provides information about each changeset present in an OpenStreetMap diff
file.
Parameters
----------
objects : osc_decoder class
A class containing OpenStreetMap object dictionaries.
Returns
-------
changeset_collation : dict
A dictionary of dictionaries with each changeset as a separate key,
information about each changeset as attributes in that dictionary,
and the actions performed in the changeset as keys.
"""
def add_changeset_info(collation, axis, item):
"""
"""
if axis not in collation:
collation[axis] = {}
first = collation[axis]
first["id"] = axis
first["username"] = item["username"]
first["uid"] = item["uid"]
first["timestamp"] = item["timestamp"]
collation[axis] = first
changeset_collation = {}
for node in objects.nodes.values():
_collate_data(changeset_collation,
node['changeset'],
node['action'])
add_changeset_info(changeset_collation,
node['changeset'],
node)
for way in objects.ways.values():
_collate_data(changeset_collation,
way['changeset'],
way['action'])
add_changeset_info(changeset_collation,
way['changeset'],
way)
for relation in objects.relations.values():
_collate_data(changeset_collation,
relation['changeset'],
relation['action'])
add_changeset_info(changeset_collation,
relation['changeset'],
relation)
return changeset_collation
def extract_objects(objects):
"""
"""
def add_object_info(collation, axis, item):
"""
"""
if axis not in collation:
collation[axis] = {}
first = collation[axis]
first["id"] = axis
first["username"] = item["username"]
first["uid"] = item["uid"]
first["timestamp"] = item["timestamp"]
first["changeset"] = item["changeset"]
first["version"] = item["version"]
first["tags"] = {}
for key in item["tags"]:
first["tags"][key] = item["tags"][key]
if axis[0] == 'n':
first["lat"] = item["lat"]
first["lon"] = item["lon"]
collation[axis] = first
object_collation = {}
for node in objects.nodes.values():
_collate_data(object_collation,
'n'+str(node['id']),
node['action'])
add_object_info(object_collation,
'n'+str(node['id']),
node)
for way in objects.ways.values():
_collate_data(object_collation,
'w'+str(way['id']),
way['action'])
add_object_info(object_collation,
'w'+str(way['id']),
way)
for relation in objects.relations.values():
_collate_data(object_collation,
'r'+str(relation['id']),
relation['action'])
add_object_info(object_collation,
'r'+str(relation['id']),
relation)
return object_collation
def extract_users(objects):
"""
"""
def add_user_info(collation, axis, item):
"""
"""
if axis not in collation:
collation[axis] = {}
collation[axis]["timestamps"] = []
collation[axis]["changesets"] = []
first = collation[axis]
first["uid"] = item["uid"]
if item["changeset"] not in first["changesets"]:
first["changesets"].append(item["changeset"])
first["timestamps"].append(item["timestamp"])
_collate_data(first, "action", item["action"])
collation[axis] = first
user_collation = {}
for node in objects.nodes.values():
add_user_info(user_collation, node['username'], node)
for way in objects.ways.values():
add_user_info(user_collation, way['username'], way)
for relation in objects.relations.values():
add_user_info(user_collation, relation['username'], relation)
return user_collation
|
#generate files for images and V-A values for SEMAINE dataset. landmark points generated by D. Aspandi because SEMAINE does not provide them
import os
import os.path as path
import zipfile
import cv2
import numpy as np
currentDir = os.getcwd()
#create the data folder that will hold all the data files
dataFolder = path.join(path.abspath(path.join(currentDir,'..')), 'data/SemData')
if not os.path.exists(dataFolder):
os.makedirs(dataFolder)
#get 'data' path
currentDirLevelUp = path.abspath(path.join(currentDir,".."))
for files in os.listdir(currentDirLevelUp):
if files == 'data':
dataDir = path.join(currentDirLevelUp,files)
#choose how many sessions: all or just a subset
sessionsDirs = []
#get list of zip files paths
for file in os.listdir(dataDir):
if file == 'SEMAINE':
subDataDir = path.join(dataDir,file)
for file2 in os.listdir(subDataDir):
if file2 == 'semaineDB':
subDataDir2 = path.join(subDataDir,file2)
for file3 in os.listdir(subDataDir2):
if file3 == 'Sessions':
subDataDir3 = path.join(subDataDir2,file3)
for file4 in os.listdir(subDataDir3):
sessionsDirs.append(path.join(subDataDir3,file4))
testDirs = sessionsDirs
#only build samples where there i both valence and arousal
hasVSess = False
hasASess = False
#dirs list for each session participant (user and/or operator)
#each session may have more than one file for a participant's valence and arousal
userAroDirs = []
userValDirs = []
operAroDirs = []
operValDirs = []
#some data files have a great difference between total time of analysis files with viodeo videoLength
#a minimum difference is needed to not save lists which will require tp cut a lot of the video
minimumDifference = 230 #170 samples = 3.4 sec, 230samples = 4.6sec
for sessDirs in testDirs:
subSessDirs = []
userVidDir = ""
userAroDirs = []
userValDirs = []
operVidDir = ""
operAroDirs = []
operValDirs = []
personSubSessDirs = []
#check if contains both valence and arousal info (files)
for subFiles in os.listdir(sessDirs):
if "DA.txt" in subFiles:
hasASess = True
if "DV.txt" in subFiles:
hasVSess = True
if hasASess and hasVSess:
hasASess,hasVSess = False, False #reset to initial values
for subFiles in os.listdir(sessDirs):
#save videos dirs for user and operator (avi files)
#save those file dirs that contain TU (user) or TO (oper) and DV,DA or DV2,DA2 substrings
if "User" in subFiles and ".avi" in subFiles:
userVidDir = path.join(sessDirs,subFiles)
if "TU" in subFiles and ("DA.txt" in subFiles or "DA2.txt" in subFiles):
userAroDirs.append(path.join(sessDirs,subFiles))
if "TU" in subFiles and ("DV.txt" in subFiles or "DV2.txt" in subFiles):
userValDirs.append(path.join(sessDirs,subFiles))
if "Operator" in subFiles and ".avi" in subFiles:
operVidDir = path.join(sessDirs,subFiles)
if "TO" in subFiles and ("DA.txt" in subFiles or "DA2.txt" in subFiles):
operAroDirs.append(path.join(sessDirs,subFiles))
if "TO" in subFiles and ("DV.txt" in subFiles or "DV2.txt" in subFiles):
operValDirs.append(path.join(sessDirs,subFiles))
#if no data files for operator, remove its video dir
if not operAroDirs and not operValDirs:
operVidDir = ""
subSessDirs = [userAroDirs,userValDirs,operAroDirs,operValDirs]
#if file has 2 at the end, this is the useful vesion, discard the original ones
#check for DV2 or DA2, take corresponding DV or DA out of file dirs
for i,subPartiValueDir in enumerate(subSessDirs):
for subDirs in subPartiValueDir:
if "DV2.txt" in subDirs:
commonSubstring = subDirs[:-5] #this is the common part to both versions names
originalOne = commonSubstring+".txt"
for subDirs2 in subPartiValueDir:
#if there is an original version to be removed for the file 2
if originalOne in subDirs2:
subSessDirs[i].remove(originalOne) #remove dir from list
print("hey")
if "DA2.txt" in subDirs:
commonSubstring = subDirs[:-5]
originalOne = commonSubstring+".txt"
for subDirs2 in subPartiValueDir:
if originalOne in subDirs2:
subSessDirs[i].remove(originalOne)
#print(subSessDirs)
#print(userVidDir,operVidDir)
userSubSessDirs = subSessDirs[:2]
operSubSessDirs = subSessDirs[2:]
personSubSessDirs.append(userSubSessDirs)
#only add operator lists variables if there is any file
if not (len(subSessDirs[2]) == 0) or not (len(subSessDirs[3]) == 0):
personSubSessDirs.append(operSubSessDirs)
v = cv2.VideoCapture(userVidDir)
v.set(cv2.CAP_PROP_POS_AVI_RATIO,1) #1 is to set pointer to the last frame
videoLength = v.get(cv2.CAP_PROP_POS_MSEC)/1000 #get duration, /1000 because ir is given in milisec
#only perform for correct files
if videoLength > 0.0:
#user or operator (if there is)
for k,subPersonSubSessDirs in enumerate(personSubSessDirs):
dimensionValuesLists = []
#valence or arousal data files
for subPartiValueDir2 in subPersonSubSessDirs:
emotionValuesLists = []
if len(subPartiValueDir2) > 1:
#subData files (different raters, same partcipant and data dimension)
for ratersSubFiles in subPartiValueDir2:
with open(ratersSubFiles, "r") as file:
listOfValues = [float(line.split()[1]) for line in file] #data samples for file
fileTotalTime = len(listOfValues)*0.02 #how long is the sample analysis time, a sample is taken every 0.02 sec
#discard data files that took much less time than video length
if len(listOfValues) > ((videoLength/0.02)-minimumDifference):
emotionValuesLists.append(listOfValues)
#print(ratersSubFiles)
dimensionValuesLists.append(emotionValuesLists)
#the common minimum length to build the data lists that will be used to write the new data files
minLength = min(list(map(len,dimensionValuesLists[0]))) #minArousal
minValenceLen = min(list(map(len,dimensionValuesLists[1])))
if (minValenceLen < minLength):
minLength = minValenceLen
roundVideoLength = round(videoLength/0.02) #to int in python 3
if (roundVideoLength < minLength):
minLength = roundVideoLength - 10 #-10 so not the exact ending of the video
#print(list(map(len,dimensionValuesLists[0])), videoLength/0.02, minLength)
#calculate average arousal and valence arrays
averageLists = []
for subDim in dimensionValuesLists:
averageValues = np.zeros(minLength)
for subRaters in subDim:
#a+=b does not work because types do not match
#the legth of all lists is aligned with limit of minLength
np.add(averageValues, np.array(subRaters[:minLength]), out=averageValues, casting='unsafe')
averageValues=averageValues/len(subDim)
averageLists.append(averageValues)
#print(len(averageValues))
folderID = userVidDir.split('/')[-2] #separate dir substrings by '/' and get penultimate (semaine session subfolder number)
#the second iteration means there is operator data, change video dir and folder ID
if k == 1:
userVidDir = operVidDir
folderID = str(int(folderID)+140) #the top number of folders ID is 140, so the new ones for operator case can use +140 to not overlap
#subfolders to store session data
newSessionFolder = path.join(dataFolder,folderID)
if not os.path.exists(newSessionFolder):
os.makedirs(newSessionFolder)
imgSessionFolder = path.join(newSessionFolder,'img')
if not os.path.exists(imgSessionFolder):
os.makedirs(imgSessionFolder)
vaSessionFolder = path.join(newSessionFolder,'annot2')
if not os.path.exists(vaSessionFolder):
os.makedirs(vaSessionFolder)
#write data to new files
counter = 0
v2 = cv2.VideoCapture(userVidDir)
print(minLength,'+++',folderID)
#each sample is taken after 0.02 seconds, to obtain fewer samples, jump each 10 samples and cut every 0.2 sec (10*0.02)
for x in range(0,minLength,10):
counter=round(counter+0.2,2) #video steps to cut to images in sec
#create imaga files and store them
v2.set(cv2.CAP_PROP_POS_MSEC,counter*1000) #time pointer in milisec
success,image = v2.read()
if success:
cv2.imwrite(imgSessionFolder+'/'+str(folderID)+'_'+str(x).zfill(5)+'.png', image)
#write [arousal valence] to .aro files and store them
with open(vaSessionFolder+'/'+str(folderID)+'_'+str(x).zfill(5)+'.aro', "w") as file:
file.write(str(averageLists[0][x])+' '+str(averageLists[1][x]))
print("__________")
|
!#/usr/bin/python
import base64
import csv
import mailbox
import os
import time
from argparse import ArgumentParser
from tqdm import tqdm
from bs4 import BeautifulSoup
def main(mbox_file, output_dir,SUBJECT):
print("Reading mbox file")
mbox = mailbox.mbox(mbox_file, factory=custom_reader)
print("{} messages to parse".format(len(mbox)))
parsed_data = []
attachments_dir = os.path.join(output_dir, "attachments")
if not os.path.exists(attachments_dir):
os.makedirs(attachments_dir)
columns = [
"Date", "From", "To", "Subject", "Return-Path",
"Content-Type", "Message-ID", "num_attachments_exported", "export_path"]
for message in tqdm(mbox):
msg_data = dict()
header_data = dict(message._headers)
try:
if not header_data['Subject'].strip() == SUBJECT:
body = getBody(message)
body = body[:32000]
for hdr in columns:
msg_data[hdr] = header_data.get(hdr, "N/A")
if len(message.get_payload()):
export_path = write_payload(message, attachments_dir)
msg_data['num_attachments_exported'] = len(export_path)
msg_data['export_path'] = ", ".join(export_path)
msg_data['body'] = body
parsed_data.append(msg_data)
create_report(parsed_data, os.path.join(output_dir, "mbox_report.csv"), columns)
except Exception as e:
print(e)
def write_payload(msg, out_dir):
pyld = msg.get_payload()
export_path = []
if msg.is_multipart():
for entry in pyld:
export_path += write_payload(entry, out_dir)
else:
content_type = msg.get_content_type()
try:
if "application/" in content_type.lower():
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "image/" in content_type.lower():
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "video/" in content_type.lower():
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "audio/" in content_type.lower():
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "text/csv" in content_type.lower():
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "info/" in content_type.lower():
export_path.append(export_content(msg, out_dir,
msg.get_payload()))
elif "text/calendar" in content_type.lower():
export_path.append(export_content(msg, out_dir,
msg.get_payload()))
elif "text/rtf" in content_type.lower():
export_path.append(export_content(msg, out_dir,
msg.get_payload()))
else:
if "name=" in msg.get('Content-Disposition', "NA"):
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
elif "name=" in msg.get('Content-Type', "N/A"):
content = base64.b64decode(msg.get_payload())
export_path.append(export_content(msg, out_dir, content))
except Exception as e:
print("e")
return export_path
def create_report(output_data, output_file, columns):
with open(output_file, 'w', newline="",encoding='utf-8') as outfile:
columns.append("body")
csvfile = csv.DictWriter(outfile, columns)
csvfile.writeheader()
csvfile.writerows(output_data)
columns.remove("body")
def custom_reader(data_stream):
data = data_stream.read()
try:
content = data.decode("ascii")
except (UnicodeDecodeError, UnicodeEncodeError) as e:
try:
content = data.decode("utf-8", errors="replace")
except Exception as e:
content = data.decode("cp1252", errors="replace")
return mailbox.mboxMessage(content)
def export_content(msg, out_dir, content_data):
file_name = get_filename(msg)
file_ext = "FILE"
if "." in file_name: file_ext = file_name.rsplit(".", 1)[-1]
file_name = "{}_{:.4f}.{}".format(file_name.rsplit(".", 1)[0], time.time(), file_ext)
file_name = os.path.join(out_dir, file_name)
if isinstance(content_data, str):
open(file_name, 'w').write(content_data)
else:
open(file_name, 'wb').write(content_data)
return file_name
def getcharsets(msg):
charsets = set({})
for c in msg.get_charsets():
if c is not None:
charsets.update([c])
return charsets
def getBody(msg):
while msg.is_multipart():
msg=msg.get_payload()[0]
t=msg.get_payload(decode=True)
for charset in getcharsets(msg):
try:
t=t.decode(charset)
except Exception as e:
print("here")
return BeautifulSoup(t,"html.parser").text
def get_filename(msg):
if 'name=' in msg.get("Content-Disposition", "N/A"):
fname_data = msg["Content-Disposition"].replace("\r\n", " ")
fname = [x for x in fname_data.split("; ") if 'name=' in x]
file_name = fname[0].split("=", 1)[-1]
elif 'name=' in msg.get("Content-Type", "N/A"):
fname_data = msg["Content-Type"].replace("\r\n", " ")
fname = [x for x in fname_data.split("; ") if 'name=' in x]
file_name = fname[0].split("=", 1)[-1]
else:
file_name = "NO_FILENAME"
fchars = [x for x in file_name if x.isalnum() or x.isspace() or x == "."]
return "".join(fchars)
if __name__ == '__main__':
parser = ArgumentParser('Parsing MBOX files')
parser.add_argument("MBOX", help="Path to mbox file")
parser.add_argument(
"OUTPUT_DIR", help="Path to output directory to write report ""and exported content")
args = parser.parse_args()
SUBJECT = "FW: FEB Rate reduction Loan documents for Review"
main(args.MBOX, args.OUTPUT_DIR,SUBJECT)
|
#-*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from collections import Counter
from datetime import datetime
path='./input/'
def get_weight(count, eps=10000, min_count=2):
return 0 if count < min_count else 1 / (count + eps)
df_train = pd.read_csv('./input/train_unigram.csv')
train_qs = pd.Series(df_train['sen1'].tolist() + df_train['sen2'].tolist()).astype(str)
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in counts.items()}
def add_word_count(df_feature, df, word):
df_feature['q1_' + word] = df['sen1'].apply(lambda x: (word in str(x).lower())*1)
df_feature['q2_' + word] = df['sen2'].apply(lambda x: (word in str(x).lower())*1)
df_feature[word + '_both'] = df_feature['q1_' + word] * df_feature['q2_' + word]
def word_shares(row):
q1_list = str(row['sen1']).lower().split()
q1words = set(q1_list)
if len(q1words) == 0:
return '0:0:0:0:0:0'
q2_list = str(row['sen2']).lower().split()
q2words = set(q2_list)
if len(q2words) == 0:
return '0:0:0:0:0:0'
words_hamming = sum(1 for i in zip(q1_list, q2_list) if i[0]==i[1])/max(len(q1_list), len(q2_list))
q1_2gram = set([i for i in zip(q1_list, q1_list[1:])])
q2_2gram = set([i for i in zip(q2_list, q2_list[1:])])
shared_2gram = q1_2gram.intersection(q2_2gram)
shared_words = q1words.intersection(q2words)
shared_weights = [weights.get(w, 0) for w in shared_words]
q1_weights = [weights.get(w, 0) for w in q1words]
q2_weights = [weights.get(w, 0) for w in q2words]
total_weights = q1_weights + q2_weights
R1 = np.sum(shared_weights) / np.sum(total_weights) #tfidf share
R2 = len(shared_words) / (len(q1words) + len(q2words) - len(shared_words)) #count share
Rcosine_denominator = (np.sqrt(np.dot(q1_weights,q1_weights))*np.sqrt(np.dot(q2_weights,q2_weights)))
Rcosine = np.dot(shared_weights, shared_weights)/Rcosine_denominator
if len(q1_2gram) + len(q2_2gram) == 0:
R2gram = 0
else:
R2gram = len(shared_2gram) / (len(q1_2gram) + len(q2_2gram))
return '{}:{}:{}:{}:{}:{}'.format(R1, R2, len(shared_words), R2gram, Rcosine, words_hamming)
def generate_match(infile,outfile):
start = datetime.now()
print('generate match feat,data path is',infile)
df_feat = pd.DataFrame()
df_data = pd.read_csv(infile,sep=',')
df_data['word_shares'] = df_data.apply(word_shares, axis=1)
df_feat['word_match_ratio'] = df_data['word_shares'].apply(lambda x: float(x.split(':')[0]))
df_feat['word_match_ratio_root'] = np.sqrt(df_feat['word_match_ratio'])
df_feat['tfidf_word_match_ratio'] = df_data['word_shares'].apply(lambda x: float(x.split(':')[1]))
df_feat['shared_count'] = df_data['word_shares'].apply(lambda x: float(x.split(':')[2]))
df_feat['shared_2gram'] = df_data['word_shares'].apply(lambda x: float(x.split(':')[3]))
df_feat['word_match_cosine']= df_data['word_shares'].apply(lambda x: float(x.split(':')[4]))
df_feat['words_hamming'] = df_data['word_shares'].apply(lambda x: float(x.split(':')[5]))
where_are_nan = np.isnan(df_feat)
where_are_inf = np.isinf(df_feat)
df_feat[where_are_nan] = 0
df_feat[where_are_inf] = 0
df_feat.to_csv(outfile, index=False)
end = datetime.now()
print('times:',end-start)
#generate_match(path+'train_unigram.csv',path+'train_match.csv')
#generate_match(path+'valid_unigram.csv',path+'valid_match.csv')
|
import time
id_to_title = {} # THESE JUST BARELY FIT IN 4G !! WHOOO
title_to_id = {}
title_to_redirect = {}
c = 0
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
start_time = time.time()
print("collecting data from articles.csv...")
with open('articles_save.csv', 'r') as f:
next(f)
for line in f:
line = line.lower().strip('"')
c += 1
if c % 100000 == 0:
print("{:,}".format(c))
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(hms_string(elapsed_time)))
arr = line.split('$')
id_to_title[int(arr[0])] = arr[1]
title_to_id[arr[1]] = int(arr[0])
print("done collecting data from articles.csv")
c = 0
print("collecting data from articles_redirect.csv...")
with open('articles_redirect_save.csv', 'r') as f:
next(f)
for line in f:
line = line.lower().strip('"')
c += 1
if c % 100000 == 0:
print("{:,}".format(c))
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(hms_string(elapsed_time)))
arr = line.split('$')
title_to_redirect[arr[1]] = arr[2];
print("done collecting data from articles_redirect.csv...")
c = 0
num_in = 0
num_not_in = 0
with open('articles_save.csv', 'r') as articles_file, \
open('wikipedia.in', 'w') as graph_file, \
open('lookup.in', 'w') as lookup_file:
next(articles_file)
for line in articles_file:
line = line.lower().strip('"')
c += 1
if c % 100000 == 0:
print("{:,}".format(c))
print("ratio: ", float(num_in) / (num_not_in + num_in))
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(hms_string(elapsed_time)))
arr = line.split('$')
u_id = int(arr[0])
lookup_file.write('%d %s\n' % (u_id, arr[1]))
for i in range(2, len(arr)): # skip id, title
first = arr[i].split('|')[0]
v_id = -1
hashtag = first.find('#')
if hashtag != -1:
first = first[0:hashtag] # remove hashtag
if first in title_to_id:
v_id = title_to_id[first];
num_in += 1
#print("is :", first)
elif first in title_to_redirect:
redirect = title_to_redirect[first];
if redirect in title_to_id:
v_id = title_to_id[redirect]
num_in += 1
#print("is :", first)
else:
num_not_in += 1
#print("not:", first)
if v_id != -1:
graph_file.write('%d %d %d\n' % (u_id, v_id, i - 1)) # u, v, rank
|
def Reverse(n):
m=0
while n>0:
d=n%10
m=m*10+d
n//=10
return m
print(4321==Reverse(1234))#expectedValue==actualValue
print(321==Reverse(123))#Unit Testing
print(1==Reverse(1))
print(21==Reverse(12))
for n in range(100,1000):
if n==Reverse(n):
print(n)
|
#!/usr/bin/env python
import os
'''
The above line tells the operating system how to deal with the file when it
is executed
'''
'''
Cgi is sending 200 OK, then lets you put any headers you want on the socket.
'''
user_agent = os.environ['HTTP_USER_AGENT']
print "Content-Type: text/html"
print
if 'Chrome' in user_agent:
print 'You\'re using Chrome'
elif 'Firefox' in user_agent:
print 'You\'re using Firefox'
else:
print "Who knows man, who knows"
|
from itertools import count
n, m = map(int, input().split())
buy = m
for i in count(1):
if n == 0:
print(i-1)
break
n -= 1
if i == buy:
n += 1
buy += m
|
## uses local version of python on sublime
import sys
#import pybel, openbabel
import base64
import math, matplotlib, scipy, pylab
import scipy.cluster.hierarchy as sch
## Writen by Trent Balius in the Shoichet Group
## this program is not functional right now
## THis program will read in the fingerprints generated by SEA and will generate a Tanimoto matrix.
## It will perform clustering and alow us to see the chemotyes and the reasons for the relationship.
## perhaps this should be a funtion with in SEA code.
## We should call the c code from sea to perform the conversion from Base64 (daylight maping) output to bitstrings.
## sea/lib/c/fast_tanimoto/fast_tanimoto.c
## sea/lib/c/fconvert/fconvert2py.c
##
#decimal to binary
def decimal2binary(n):
bStr = ''
if n < 0: raise ValueError, "must be a positive integer"
if n == 0: return '0'
while n > 0:
bStr = str(n % 2) + bStr
n = n >> 1
return bStr
def makeFingerPrintArray(filehandel):
lines = filehandel.readlines()
fplist = []
for line in lines:
fp = line.split(';')[0]
print fp
b = base64.b64decode(fp)
fplist.append(fp)
return fplist
def calcTanimoto(A,B):
## A, B are bool arrays
## Tc = (A and B)/(A or B) = sum (ai and bi) / sum ( ai or bi)
AandB = 0
AorB = 0
for i in range(len(fp1)):
AandB = Aandb + (A[i] and B[i])
AorB = Aorb + (A[i] or B[i])
return AandB/AorB
def makeTanimotoMatrix(fingerprints1,fingerprints2):
if len(fingerprints1) == 1 or len(fingerprints2) == 1:
print "fingerprints is size 1"
## intialize matrix
print "makeing a " + str(len(fingerprints1)) + "X" + str(len(fingerprints1)) + "Matrix. "
matrix = []
for i in range(len(fingerprints1)):
row = []
for j in range(len(fingerprints2)):
row.append(0)
matrix.append(row)
## fill matrix
for i in range(len(fingerprints1)):
for j in range(len(fingerprints2)):
tc = calcTanimoto(fingerprints1[i], fingerprints2[j])
matrix[i][j] = tc
return matrix
def write_matrix(filehandel,Matrix):
for i in range(len(Matrix)):
for j in range(len(Matrix[i])):
if (j == 0):
filehandel.write('%f' % (Matrix[i][j]))
filehandel.write(',%f' % (Matrix[i][j]))
filehandel.write('\n')
def mat_to_mat(Mat):
#print "I AM HERE in mat_to_mat(Mat)"
## 1 - tc is more like a distance than tc.
m = len(Mat)
n = len(Mat[0])
if (m != n):
print "inconsitancy in numbers of rows and columns in the matrix."
print m,n
X = scipy.zeros([m,n])
## converts from a 2D array to Scipy Matrix
for i in range(0,m):
for j in range(0,n):
X[i,j] = -Mat[i][j] + 1.0
return X
def mat_to_vector(Mat):
## 1 - tc is more like a distance than tc.
m = len(Mat)
n = len(Mat[0])
if (m != n):
print "inconsitancy in numbers of rows and columns in the matrix."
sys.exit()
print m,n
X = scipy.zeros([m,n])
Xvec = scipy.zeros(n*(n-1)/2)
count2 = 0
## converts from a 2D array to Scipy Matrix
for i in range(0,n):
for j in range(0,n):
X[i,j] = -Mat[i][j] + 1.0
for i in range(0,n):
for j in range(i+1,n):
Xvec[count2] = -Mat[i][j] + 1.0
count2 = count2+1
return X,Xvec
def heatmap_not_sym(Mat,filename,idx1,idx2):
m = len(Mat)
n = len(Mat[0])
print m,n
#print 'test',len(idx1),len(idx2)
ylabel = []
xlabel = []
for i in range(0,m):
ylabel.append('lig_'+str(idx1[i]+1))
for i in range(0,n):
xlabel.append('lig_'+str(idx2[i]+1))
fig = pylab.figure(figsize=(8,8))
Mat = mat_to_mat(Mat)
Mat = Mat[idx1,:]
Mat = Mat[:,idx2]
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
cdict = {'red': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,100)
im = axmatrix.imshow(Mat, aspect='auto', origin='lower',interpolation='nearest', cmap=my_cmap)
im.set_clim(0,1)
axmatrix.set_xlim(-0.5, n-0.5)
axmatrix.set_ylim(-0.5, n-0.5)
axmatrix.set_xticks(range(0,n))
axmatrix.set_xticklabels(xlabel)
axmatrix.set_yticks(range(0,m))
axmatrix.set_yticklabels(ylabel)
for i in range(0,m):
label = axmatrix.yaxis.get_major_ticks()[i].label
label.set_fontsize(4)
for i in range(0,n):
label = axmatrix.xaxis.get_major_ticks()[i].label
label.set_fontsize(4)
label.set_rotation('vertical')
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
pylab.colorbar(im, cax=axcolor)
fig.show()
fig.savefig(filename,dpi=600)
def heatmap(Mat,bool_sort,filename):
m = len(Mat)
n = len(Mat[0])
print m,n
xlabel = []
for i in range(0,m):
xlabel.append('lig_'+str(i+1))
ylabel = []
for i in range(0,n):
ylabel.append('lig_'+str(i+1))
fig = pylab.figure(figsize=(8,8))
if (bool_sort):
Mat, Matvec = mat_to_vector(Mat)
Y = sch.linkage(Matvec, method='single')
threshold = 0.2
clusters = sch.fcluster(Y, threshold, 'distance')
print clusters
ax1 = fig.add_axes([0.09,0.1,0.2,0.6])
Z1 = sch.dendrogram(Y, orientation='right')
#help(sch.dendrogram)
ax1.set_xticks([])
ax1.set_yticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Z2 = sch.dendrogram(Y)
ax2.set_xticks([])
ax2.set_yticks([])
#ax2.set_xlim(-1, n)
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
Mat = Mat[idx1,:]
Mat = Mat[:,idx2]
#xlabel[:] = xlabel[idx2]
xlabel_new = []
for i in range(len(idx2)):
xlabel_new.append(xlabel[idx2[i]])
del xlabel[:]
xlabel = xlabel_new
else:
Mat = mat_to_mat(Mat)
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
cdict = {'red': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,100)
im = axmatrix.imshow(Mat, aspect='auto', origin='lower',interpolation='nearest', cmap=my_cmap)
if (bool_sort):
v = range(0,n)
axmatrix.plot(v,v,'yo',markersize=2)
im.set_clim(0,1)
axmatrix.set_xlim(-0.5, n-0.5)
axmatrix.set_ylim(-0.5, n-0.5)
axmatrix.set_xticks(range(0,m))
axmatrix.set_xticklabels(xlabel)
if (not bool_sort):
axmatrix.set_yticks(range(0,n))
axmatrix.set_yticklabels(ylabel)
for i in range(0,n):
label = axmatrix.yaxis.get_major_ticks()[i].label
label.set_fontsize(4)
else:
axmatrix.set_yticks([])
for i in range(0,m):
label = axmatrix.xaxis.get_major_ticks()[i].label
label.set_fontsize(4)
label.set_rotation('vertical')
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
pylab.colorbar(im, cax=axcolor)
fig.show()
fig.savefig(filename,dpi=600)
def main():
if len(sys.argv) != 4: # if no input
print "You must entered in 3 inputs:"
print "fingerprints1, fingerprints2, output prefix"
exit()
file1name = sys.argv[1]
file2name = sys.argv[2]
outname = sys.argv[3]
print "input file = " + file1name
print "output matrix file = " + file2name
file1handel = open(file1name,'r')
file2handel = open(file2name,'r')
makeFingerPrintArray(file1handel)
file1handel.close()
file2handel.close()
return
#heatmap(m,True,file2name+'.png')
main()
|
#coding=utf-8
# 实现矩阵按照从外向里按照顺时针的方式打印出来
def printMatrix(matrix):
res=[]
n=len(matrix)
m=len(matrix[0])
if m==1 and n==1:
res=[matrix[0][0]]
return res
else:
for o in range((min(m,n)+1)//2):
[res.append(matrix[o][i]) for i in range(o,m-o)]
[res.append(matrix[j][m-o-1]) for j in range(o,n-o) if matrix[j][m-o-1] not in res]
[res.append(matrix[n-o-1][k]) for k in range(m-1-o,o-1,-1) if matrix[n-o-1][k] not in res]
[res.append(matrix[l][o]) for l in range(n-1-o,o-1,-1) if matrix[l][o] not in res]
return res
if __name__ == "__main__":
lists = [
[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16,]
]
l0 = printMatrix(lists)
print l0
|
'''
* TestON is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
* TestON is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
'''
class FvtTest :
def __init__(self) :
self.default = ''
def CASE1(self,main) :
main.case("Checking FVT")
main.step("Checking the FVT")
pkt = main.FVT.simplePacket("SRC_MAC_FOR_CTL0_0")
in_port = 3
msg = main.FVT.genPacketIn(in_port=in_port, pkt=pkt)
snd_list = ["switch", 0, msg]
exp_list = [["controller", 0, msg]]
res = main.FVT.ofmsgSndCmp(snd_list , exp_list , xid_ignore=True, hdr_only=True)
utilities.assert_equals(expect=True,actual=res,onpass="Received expected message",onfail="Received unexpected message")
#Packet_in for controller1
pkt = main.FVT.simplePacket("SRC_MAC_FOR_CTL1_0")
in_port = 3
msg = main.FVT.genPacketIn(in_port=in_port, pkt=pkt)
snd_list = ["switch", 0, msg]
exp_list = [["controller", 1, msg]]
res = main.FVT.ofmsgSndCmp(snd_list , exp_list , xid_ignore=True)
utilities.assert_equals(expect=True,actual=res,onpass="Received expected message",onfail="Received unexpected message")
|
"""This file contains base form class which helps integrate WTF-Alchemy
and Flask-WTF, since it doesn't work properly ot of the box."""
import flask_wtf
import wtforms_alchemy
from timeless.db import DB
BaseModelForm = wtforms_alchemy.model_form_factory(flask_wtf.FlaskForm)
class ModelForm(BaseModelForm):
"""It's made to support working WTF-Alchemy with Flask-WTF, look at
https://wtforms-alchemy.readthedocs.io/en/latest/advanced.html for
details."""
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop("instance", None)
super().__init__(*args, **kwargs)
@classmethod
def get_session(cls):
return DB.session
def create(self, session):
self.instance = self.Meta.model()
self.populate_obj(self.instance)
session.add(self.instance)
def update(self, session):
self.populate_obj(self.instance)
session.merge(self.instance)
def save(self, commit=True):
session = self.get_session()
if self.instance:
self.update(session)
else:
self.create(session)
if commit:
session.commit()
return self.instance
|
from app.util import extract_guid
def test_extract_guid():
note_link = 'evernote:///view/536854/s1/73d53dcc-c4c3-40c6-aeff-55138da5ec26/73d53dcc-c4c3-40c6-aeff-55138da5ec26/'
assert extract_guid(note_link) == '73d53dcc-c4c3-40c6-aeff-55138da5ec26'
inapp_note_link = 'https://www.evernote.com/shard/s1//nl/536854/0c1ee56e-d792-4e01-9f71-9ecb13fdea30'
assert extract_guid(inapp_note_link) == '0c1ee56e-d792-4e01-9f71-9ecb13fdea30' |
from time import sleep
from datetime import datetime
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import threading
# Global Vars
wind_speed = 0
max_wind_speed = float('-inf')
min_wind_speed = float('inf')
rotations = 0
def get_wind_speed() -> tuple:
"""
Gets the current, maximum, and minimum windspeeds
Arguments: None
Returns: a tuple: (wind_speed, max_wind_speed, min_wind_speed)
"""
global wind_speed
global max_wind_speed
global min_wind_speed
return round(wind_speed,2), round(max_wind_speed, 2), round(min_wind_speed,2)
def reset_wind_speed() -> None:
"""
Resets the maximum and minimum wind speeds
Arguments: None
Returns: None
"""
global max_wind_speed
global min_wind_speed
max_wind_speed = float('-inf')
min_wind_speed = float('inf')
def button_callback(channel) -> None:
"""
Callback function to increase the number of rotations of the anemometer
Arguments:
- channel: the callback channel
Returns: None
"""
global rotations
rotations += 1
def anemometer(avg_sampling_time: int = 60) -> None:
"""
Calculates the anemometer wind speed
Argumetns: None
Retruns: None
"""
# GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use physical pin numbering
GPIO.setup(14, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 8 to be an input pin and set initial value to be pulled low (off)
# GPIO.add_event_detect(14, GPIO.FALLING, callback=button_callback, bouncetime=125)
GPIO.add_event_detect(14, GPIO.FALLING, callback=button_callback)
# message = input("Press enter to quit\n\n") # Run until someone presses enter
# return count
while True:
global rotations
global max_wind_speed
global min_wind_speed
global wind_speed
rotations = 0
sleep(avg_sampling_time)
wind_speed = round(rotations*2.25*1.609344/avg_sampling_time, 2)
if wind_speed > max_wind_speed:
max_wind_speed = wind_speed
if wind_speed < min_wind_speed:
min_wind_speed = wind_speed
GPIO.cleanup() # Clean up
def is_any_thread_alive(threads) -> bool:
"""
Checks if there are any threads running
Arguments:
- threads: A list of threads running
returns: True if there are any live threads, False otherwise
"""
return True in [t.is_alive() for t in threads]
def main() -> None:
"""
Driver function
"""
run_anemometer = threading.Thread(target=anemometer, name="Anemometer", daemon=True)
run_anemometer.start()
# anemometer()
while is_any_thread_alive([run_anemometer]):
print("rotations: " + str(rotations) + "\t" + str(wind_speed) + "KPH" + "\t" + str(max_wind_speed) + "KPH" + "\t" + str(min_wind_speed) + "KPH")
sleep(3)
if __name__ == '__main__':
main() |
import socket, sys
from struct import *
source_ip = '192.168.159.129'
dest_ip = '192.168.130.0'
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_UDP)
except socket.error , msg:
print ' Socket could not be created. Error code : ' + str(msg[0]) + ' Messsage ' + msg[1]
sys.exit()
packet = '';
ip_ihl = 5
ip_ver = 4
ip_tos = 0
ip_tot_len = 0
ip_id = 54321
ip_frag_off = 0
ip_ttl = 255
ip_proto = socket.IPPROTO_UDP
ip_check = 0
ip_saddr = socket.inet_aton ( source_ip )
ip_daddr = socket.inet_aton ( dest_ip )
ip_ihl_ver = (4 << 4) | 5
ip_header = pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off, ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr)
user_data = 'Hello, how are you'
source_address = socket.inet_aton( source_ip )
dest_address = socket.inet_aton(dest_ip)
placeholder = 0
protocol = socket.IPPROTO_UDP
length = len(user_data)
psh = pack('!4s4sBBH' , source_address , dest_address , placeholder , protocol, length);
psh = psh + user_data;
packet = ip_header + user_data
var = 1
while var == 1:
s.sendto(packet, (dest_ip , 0 ))
print "Sending",(s.sendto(packet,(dest_ip,0))),'bytes..'
|
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from xml.dom import minidom
import libvirt
import argparse
import os
import sys
import subprocess
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
import json
except ImportError:
import simplejson as json
def parse_args():
"""
Create command line parser for libvirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for Libvirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def load_config_file():
# Get the path of the configuration file, by default use
# 'libvirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'libvirt.ini',
)
config_path = os.environ.get('LIBVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
config = configparser.ConfigParser(
defaults={
'libvirt_connection_url': 'None'
}
)
if not config.has_section('libvirt'):
config.add_section('libvirt')
config.read(config_path)
return config
def create_connection():
"""
Create a connection to libvirt engine API.
"""
config = load_config_file()
# Create a connection with options defined in ini file:
url = config.get('libvirt', 'libvirt_connection_url')
try:
conn = libvirt.open(url)
if conn == None:
print("Failed to open connection to %s" % (url), file=sys.stderr)
exit(1)
except:
return None
return conn
def get_dict_of_struct(connection, target_vm_name):
data={}
vm = connection.lookupByName(target_vm_name)
# VM state
state, reason = vm.state()
if state == libvirt.VIR_DOMAIN_NOSTATE:
_state = 'NOSTATE'
elif state == libvirt.VIR_DOMAIN_RUNNING:
_state = 'RUNNING'
elif state == libvirt.VIR_DOMAIN_BLOCKED:
_state = 'BLOCKED'
elif state == libvirt.VIR_DOMAIN_PAUSED:
_state = 'PAUSED'
elif state == libvirt.VIR_DOMAIN_SHUTDOWN:
_state = 'SHUTDOWN'
elif state == libvirt.VIR_DOMAIN_SHUTOFF:
_state = 'SHUTOFF'
elif state == libvirt.VIR_DOMAIN_CRASHED:
_state = 'CRASHED'
elif state == libvirt.VIR_DOMAIN_PMSUSPENDED:
_state = 'PMSUSPENDED'
else:
_state = 'UNKNOWN'
if _state == 'RUNNING':
# VM Network interface
ifaces = vm.interfaceAddresses(libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT, 0)
vm_ip=''
for (name, val) in ifaces.items():
if val['addrs']:
for ipaddr in val['addrs']:
# print("ipaddr=%s" %(ipaddr))
if ipaddr['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4 and ipaddr['addr'] != '127.0.0.1' and ipaddr['addr'].find('192.168.200') != -1:
vm_ip = ipaddr['addr']
#print("vm_ip=%s" %(vm_ip))
# Hostname
cmd = subprocess.Popen("ssh -o StrictHostKeyChecking=no centos@%s -- hostname" %(vm_ip), shell=True, stdout=subprocess.PIPE)
#cmd = subprocess.Popen("host 1.8.8.8", shell=True, stdout=subprocess.PIPE)
vm_hostname=''
for line in cmd.stdout:
vm_hostname = line[:-1]
# For reference
# for line in cmd.stdout:
# if "pointer" in line:
# hostnm = line.rsplit(' ',1)[1]
# vm_hostname = hostnm[:-2]
# else:
# vm_hostname = 'NXDOMAIN'
data={
'name': vm.name(),
'status': _state,
'os_type': vm.OSType(),
'memory': str(vm.info()[2]),
'max_memory': str(vm.maxMemory()),
'cpu': str(vm.info()[3]),
'max_cpu': str(vm.maxVcpus()),
'ip': vm_ip,
'hostname': vm_hostname.decode("utf-8")
}
else:
data={
'name': vm.name(),
'status': _state,
'memory': str(vm.info()[2]),
'max_memory': str(vm.maxMemory()),
'cpu': str(vm.info()[3]),
}
return data
def get_data(connection, target_vm_name=None):
"""
Obtain data of `target_vm_name` if specified, otherwise obtain data of all vms.
"""
if connection == None:
data = defaultdict(list)
data["_meta"] = {
'hostvars': []
}
return data
all_vms_name_list = connection.listAllDomains()
active_vms_id_list = connection.listDomainsID()
active_vms_name_list = []
# VM ID ==> VM Name
if len(active_vms_id_list) != 0:
for vmID in active_vms_id_list:
vm = connection.lookupByID(vmID)
active_vms_name_list.append(vm.name())
# Check if target VM is running
# if it is running, return only the VM data. If not, return all VM data.
if target_vm_name and target_vm_name in active_vms_name_list:
data = get_dict_of_struct(
connection=connection,
target_vm_name=target_vm_name,
)
else:
vms = dict()
data = defaultdict(list)
for vm in all_vms_name_list:
name = vm.name()
# Add vm to vms dict
vms[name] = get_dict_of_struct(connection, name)
# Add vm to group by OKD version (Additional Data for OKD)
okd_version = vm.name().split('_')[0]
if okd_version in vm.name():
data['group_%s_all' % okd_version].append(vm.name())
if 'master' in vm.name():
data['group_%s_masters' % okd_version].append(vm.name())
if 'app' in vm.name():
data['group_%s_app_nodes' % okd_version].append(vm.name())
if 'infra' in vm.name():
data['group_%s_infra_nodes' % okd_version].append(vm.name())
if 'etcd' in vm.name():
data['group_%s_etcd_nodes' % okd_version].append(vm.name())
if 'lb' in vm.name():
data['group_%s_lb_nodes' % okd_version].append(vm.name())
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
target_vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty*2,
)
)
if __name__ == '__main__':
main()
|
"""
Buil in module
--- contoh waktu
documentasinya bisa dilihat di websitenya python
"""
import datetime
# print(datetime.datetime.now())
# #atau membuat tangal kita sendiri
# date = datetime.datetime(1999,8,10)
# print(date)
#atau membuat tangal degan format
date = datetime.datetime.now()
print(date.strftime("%Y %B %d")) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Water Supply model
- WaterSupplySectorModel implements SectorModel
- wraps ExampleWaterSupplySimulationModel
- instantiate a model instance
"""
import logging
import numpy as np
from smif.model.sector_model import SectorModel
class WaterSupplySectorModel(SectorModel):
"""Example of a class implementing the SectorModel interface,
using one of the toy water models below to simulate the water supply
system.
"""
def simulate(self, data):
"""Simulate water supply
Arguments
=========
data
- inputs/parameters, implicitly includes:
- scenario data, e.g. expected level of rainfall
- data output from other models in workflow
- parameters set or adjusted for this model run
- system state data, e.g. reservoir level at year start
- system state, implicity includes:
- initial existing system/network
- decisions, e.g. asset build instructions, demand-side interventions to apply
"""
# State
current_interventions = data.get_current_interventions()
self.logger.debug("Current interventions: {}".format(current_interventions))
number_of_treatment_plants = 2
# Inputs
per_capita_water_demand = data.get_parameter(
"per_capita_water_demand"
).as_ndarray() # liter/person
population = data.get_data("population").as_ndarray() # people
water_demand = data.get_data("water_demand").as_ndarray() # liter
final_water_demand = (population * per_capita_water_demand) + water_demand
raininess = sum(
data.get_data("precipitation").as_ndarray()
) # milliliters to mega
if data.current_timestep == data.base_timestep:
reservoir_level = data.get_data("reservoir_level", 2009)
else:
reservoir_level = data.get_previous_timestep_data("reservoir_level")
reservoir_level = sum(reservoir_level.as_ndarray()) # megaliters
self.logger.info("Total reservoir level before timestep: %s", reservoir_level)
self.logger.debug(
"Parameters:\n "
"Population: %s\n"
"Raininess: %s\n "
"Reservoir level: %s\n "
"Final demand: %s\n",
population.sum(),
raininess.sum(),
reservoir_level,
final_water_demand,
)
# Parameters
self.logger.debug(data.get_parameters())
# simulate (wrapping toy model)
instance = ExampleWaterSupplySimulationModel()
instance.raininess = raininess
instance.number_of_treatment_plants = number_of_treatment_plants
instance.reservoir_level = reservoir_level
# run
water, cost = instance.run()
self.logger.info(
"Water: %s, Cost: %s, Reservoir: %s", water, cost, instance.reservoir_level
)
# set results
data.set_results("water", np.ones((3,)) * water / 3)
data.set_results("cost", np.ones((3,)) * cost / 3)
data.set_results("energy_demand", np.ones((3,)) * 3)
data.set_results(
"reservoir_level", np.ones((3,)) * instance.reservoir_level / 3
)
def extract_obj(self, results):
return results["cost"].sum()
class ExampleWaterSupplySimulationModel(object):
"""An example simulation model used for testing purposes
Parameters
==========
raininess : int
The amount of rain produced in each simulation
number_of_treatment_plants : int
The amount of water is a function of the number of treatment plants and
the amount of raininess
"""
def __init__(
self, raininess=None, number_of_treatment_plants=None, reservoir_level=None
):
self.raininess = raininess
self.number_of_treatment_plants = number_of_treatment_plants
self.reservoir_level = reservoir_level
def run(self):
"""Runs the water supply model
Only 1 unit of water is produced per treatment plant,
no matter how rainy.
Each treatment plant costs 1.0 unit.
"""
logger = logging.getLogger(__name__)
logger.debug("There are %s plants", self.number_of_treatment_plants)
logger.debug("It is %s rainy", self.raininess)
logger.debug("Reservoir level was %s", self.reservoir_level)
self.reservoir_level += self.raininess
water = min(self.number_of_treatment_plants, self.reservoir_level)
logger.debug("The system produces %s water", water)
self.reservoir_level -= water
logger.debug("Reservoir level now %s", self.reservoir_level)
cost = 1.264 * self.number_of_treatment_plants
logger.debug("The system costs £%s", cost)
return water, cost
if __name__ == "__main__":
"""Run core model if this script is run from the command line"""
CORE_MODEL = ExampleWaterSupplySimulationModel(1, 1, 2)
CORE_MODEL.run()
|
# -*- coding: utf-8 -*-
{
'name' : 'MGM Sales Bar Chart',
'version' : '1.0',
'category': 'sale',
'author': 'Hashmicro/GYB IT SOLUTIONS-Anand',
'description': """ Create Bar Chart for Ferry, FLF, Tug and Barge, Stevedoring, Other Quotation, Total,
""",
'website': 'http://www.hashmicro.com/',
'depends' : [
'sale', 'sales_team', 'crm', 'so_blanket_order', 'account'],
'data': [
'security/ir.model.access.csv',
'views/mgm_sales_bar_chart.xml',
'data/sales_bar_chart_demo.xml',
],
'demo': [
],
'qweb': [
],
'installable': True,
'application': True,
'auto_install': False,
}
|
import scipy.io as sio
LOGGER = None
class MatfileSOKPReader(object):
def read_mat(self, file_name: str) -> 'MatfileSOKPReader':
temp_data = sio.loadmat(file_name)
# Check if the loaded dictionary has a field named `data`
if 'data' in temp_data:
# Looks like a Eltodo SOKP format
gantry_data_struct = temp_data['data']
# If it is the format in question, it will have the following field set
field_list = ['date', 'time_step', 'gantry_id', 'lanemap', 'los', 'cnt', 'occ', 'spd']
field_set = set(field_list)
gantry_data_field_set = set(gantry_data_struct.dtype.names)
if field_set == gantry_data_field_set:
# Yes, the file has the expected set of fields
date = str(gantry_data_struct[0, 0]['date'])
self.time_step = int(gantry_data_struct[0, 0]['time_step'])
vehicle_counts = gantry_data_struct[0, 0]['cnt']
pers_vehicles = vehicle_counts[2, 0]
pv_shape = pers_vehicles.shape
LOGGER.info('Data {:s} are in SOKP format, date {:s}, time step {:d} seconds'.format(
file_name, date, self.time_step))
self.data = pers_vehicles[:, 0] # remember that the values are uint8
del temp_data, gantry_data_struct, vehicle_counts, pers_vehicles
|
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import Dict
import numpy as np
# ------------------------------------------------------------
class Action(object):
"""
Action class with modifiers.
:ivar float bid: Bid.
:ivar dict of dict modifiers: dict of dict of modifiers. Every sub dict
contains modifiers for a single dimension (e.g. gender, location, device etc.).
Modifiers are expressed in a multiplicative form, e.g. +30\% is expressed as 1.3.
The value 1.0 denotes no modifier.
Example: {bid=1.0, modifiers={'gender': {'F': 1.2, 'M': 1.1, 'U': 1.0},
'age': {'0-19': 0.7, '30-39': 1.1, '60-69': 0.9, '50-59': 0.8, '70-*': 1.0, '20-29': 1.5, '40-49': 1.2}}}
"""
def __init__(self, bid, modifiers=None):
"""
:param bid: float
:param modifiers: if not given, must be validated/initialized against an ActionSet
"""
self.bid = bid
self.modifiers = modifiers # type: Dict[str, Dict[str, float]]
def __repr__(self):
if self.modifiers is not None:
if isinstance(self.modifiers, dict):
mod_truncated_dict = {k: {k2: np.round(v, 2) for k2, v in d.items()} for k, d in self.modifiers.items()}
return "{{bid={}, modifiers={}}}".format(self.bid, mod_truncated_dict)
else: # To be removed in the future after clean-up
return "{{bid={}, modifiers={}}}".format(self.bid, [[np.round(v, 2) for v in l] for l in self.modifiers])
else: # when modifier is unspecified
return "{{bid={}, modifiers={}}}".format(self.bid, "None")
class ActionSet(object):
"""
Action Set class
provides validator for action
"""
MOD_DEF_VALUE = 1.0 # default value for modifiers in validify_action
def __init__(self, attr_set, max_bid, min_bid, max_mod, min_mod):
"""
:param attr_set: Attribute set object
:param max_bid: max possible base bid value
:param min_bid: min possible base bid value
:param max_mod: max possible modifier value
:param min_mod: min possible modifier value
"""
self.attr_set = attr_set
self.max_bid = max_bid
self.min_bid = min_bid
self.max_mod = max_mod
self.min_mod = min_mod
def validify_action(self, a, in_place=False):
""" initialize action as a valid form to the action set.
implementation: fills all missing modifiers to ActionSet.MOD_DEF_VALUE to create a "valid" action
DOES NOT remove unnecessary modifiers not defined in self.attr_set.attr_names
:param Action a: Action.
:param bool in_place: if True, param a is modified in-place. Otherwise, a new Action object is returned
:return: A valid action object, if in_place=False (default); None, otherwise (argument a is updated in-place)
"""
assert isinstance(a, Action) # TODO exception handling
new_mods = {}
for k in self.attr_set.attr_names:
new_mod = {k2: ActionSet.MOD_DEF_VALUE for k2 in self.attr_set.attr_sets[k]}
if a.modifiers is not None and k in a.modifiers.keys():
new_mod.update(a.modifiers[k])
new_mods[k] = new_mod
if in_place:
a.modifiers = new_mods
return None
else:
return Action(bid=a.bid, modifiers=new_mods)
def is_valid(self, a):
"""
returns true if the given action a is "valid" according to this ActionSet
Validity check
- bid modifiers are defined for all attributes defined by self.attr_set
- bid modifiers result in valid bids for all attributes defined by self.attr_set
:param a: Action
:return: True, None if valid
False, str if invalid. The second str explains the reason why invalid
"""
base_bid = a.bid
mod_lists = a.modifiers
attr_names = self.attr_set.attr_names
if not len(mod_lists) == len(attr_names):
return False, "modifier list's length not matching attribute names" # number of attribute names mismatch
if not self.min_bid <= base_bid:
return False, "base bid less than min_bid"
if not base_bid <= self.max_bid:
return False, "base bid greater than max_bid"
for k in attr_names:
try:
mods = a.modifiers[k]
except KeyError:
return False, "modifier does not have key {} defined".format(k)
mod_list = []
seg_names = self.attr_set.attr_sets[k]
for k2 in seg_names:
try:
mod_list.append(mods[k2])
except KeyError:
return False, "modifier for {} does not have segment {} defined".format(k, k2)
if not all([self.min_mod <= m for m in mod_list]):
return False, "mod value less than min_mod " # min_mod violated
if not all([m <= self.max_bid for m in mod_list]):
return False, "mod value greater than max_mod" # max_mod violated
return True, None
if __name__ == "__main__":
# from simulator.attribute import AttrSet
import attribute
# sample attrSet
names = ['gender', 'age']
vals = {'gender': ['M', 'F', 'U'],
'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}
attr_set = attribute.AttrSet(names, vals)
act_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)
# valid action
# a1 = Action(1.0, [ [1.1, 1.2, 1.0], [0.7, 1.5, 1.1, 1.2, 0.8, 0.9, 1.0] ] )
a1 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a1))
# invalid action: modifier not fully defined
a2 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9}})
print(act_set.is_valid(a2))
# invalid action: less than min_bid found
a3 = Action(0.00001, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a3))
# invalid action: greater than max_bid found
a4 = Action(120, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a4))
# invalid action: greater than max_mod found
a5 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 10.0}})
print(act_set.is_valid(a5))
# invalid action: less than min_mod found
a6 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 0.01}})
print(act_set.is_valid(a6))
# check __str__ form of Action
print(a1)
# sanity check for validify_action
a_inc1 = Action(1.0) # modifier not defined
print(a_inc1)
a_inc2 = act_set.validify_action(a_inc1) # in_place modification of a_inc1
print(a_inc1, a_inc2)
# checking in_place flag of validify_action
a_inc3 = Action(1.0)
print(a_inc3)
act_set.validify_action(a_inc3, in_place=True) # returns a new action (preserves a_inc2)
print(a_inc3)
# checking incomplete action fill-ins for a totally missing attribute name
a_inc4 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0}})
print(a_inc4)
a_inc4_validify = act_set.validify_action(a_inc4) # in_place modification of a_inc1
print(a_inc4_validify)
# checking incomplete action fill-ins for a partially missing attribute name with a totally missing name
a_inc5 = Action(1.0, {'gender': {'M': 1.1}})
print(a_inc5)
a_inc5_validify = act_set.validify_action(a_inc5) # in_place modification of a_inc1
print(a_inc5_validify)
|
# -*- coding: utf-8 -*-
'''
Author : Mingshi <fivemingshi@gmail.com>
Created : 2014/03/12 10:53:56
FileName: captcha.py
'''
from flask import Blueprint, Response, session
from wf.util.captcha import *
import StringIO
from wf import app
mod = Blueprint("captcha", __name__)
@mod.route('/captcha')
def captcha() :
app.secret_key = app.config['SECRET_KEY']
res = create()
image = res['image']
SESSION_KEY_CAPTCHA = app.config['SESSION_KEY_CAPTCHA']
session["'" + SESSION_KEY_CAPTCHA + "'"] = res['chars']
buf = StringIO.StringIO()
image.save(buf,'png',quality=90)
return Response(buf.getvalue(), mimetype='image/png')
|
from flask import Flask, render_template
import json
from sqlalchemy import create_engine
import psycopg2
app = Flask(__name__)
app = Flask(__name__,
static_url_path='',
static_folder='static')
app.config['JSON_AS_ASCII'] = False
engine = create_engine('postgresql://postgres:ximepss030311@localhost:5432/Movies_DB')
@app.route("/")
def home():
return render_template("home.html")
@app.route("/credits")
def credits():
try:
connection = psycopg2.connect(
database='Movies_DB',
user='postgres',
host='localhost',
password='ximepss030311'
)
cursor = connection.cursor()
postgreSQL_select_Query = 'SELECT * FROM "Credits_Data"'
cursor.execute(postgreSQL_select_Query)
mobile_records = cursor.fetchall()
except (Exception, psycopg2.Error) as error :
print ('Error while fetching data from PostgreSQL', error)
return render_template("credits.html", mobile_records = mobile_records)
@app.route("/movies")
def movies():
try:
connection = psycopg2.connect(
database='Movies_DB',
user='postgres',
host='localhost',
password='ximepss030311'
)
cursor = connection.cursor()
postgreSQL_select_Query = 'SELECT * FROM "MOVIE_DATA"'
cursor.execute(postgreSQL_select_Query)
mobile_records = cursor.fetchall()
except (Exception, psycopg2.Error) as error :
print ('Error while fetching data from PostgreSQL', error)
return render_template("index.html", mobile_records = mobile_records)
if __name__=="__main__":
app.run(debug=True) |
# -*- coding: utf-8 -*-
from flask import render_template, jsonify, request
from flask.ext.basicauth import BasicAuth
from app import app
from app import db, models
from app import basic_auth
from sqlalchemy import func, orm
from sqlalchemy.orm import load_only
import LIRCCmd
from datetime import datetime, timedelta
import time
import parsedatetime as pdt
import re
thermoStateStr = {
0 : u"INIT",
1 : u"OFF-C",
2 : u"EXT-C",
3 : u"AC-FN",
4 : u"AC-M",
5 : u"AC-H",
6 : u"OFF-H",
7 : u"EXT-H",
8 : u"HEAT"
}
ol0 = orm.aliased(models.OperationLog)
wd0 = orm.aliased(models.WeatherData)
st0 = orm.aliased(models.SensorTagData)
def unix_time(dt):
"""Utility function - get unix time."""
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
@app.route(u'/toggle_on_off')
@basic_auth.required
def toggle_on_off():
LIRCCmd.toggleOnOff()
return jsonify({u'Result':u'Success'})
@app.route(u'/_get_current_data')
def get_current_data():
"""Returns JSON describing the last thing in the system log."""
global ol0
global wd0
#perform query
opLog = db.session.query(ol0).filter(ol0.id==db.session.query(ol0).with_entities(func.max(ol0.id)).one()[0])[0]
wData = db.session.query(wd0).filter(wd0.id==db.session.query(wd0).with_entities(func.max(wd0.id)).one()[0])[0]
mTime = unix_time(opLog.time)
inTemp = opLog.indoorTemp
setPtTemp = opLog.setpointTemp
state = unicode(thermoStateStr[opLog.state])
extTemp = wData.extTemp
extTempTime = unix_time(wData.time)
return jsonify({
u'inTemp' : inTemp,
u'inTempTime' : mTime,
u'outTemp' : extTemp,
u'outTempTime' : extTempTime,
u'setPtTemp' : setPtTemp,
u'opMode' : state
})
@app.route(u'/_get_history')
@basic_auth.required
def get_history():
"""Returns JSON containing the last n hours of log data."""
global ol0
global wd0
h = float(request.args.get('hours'))
#perform query
opLog = db.session.query(ol0).filter(ol0.time >= datetime.now() - timedelta(hours=h)).all()
wData = db.session.query(wd0).filter(wd0.time >= datetime.now() - timedelta(hours=h)).all()
#extract data we care about
opLogTimes = [unix_time(x.time) for x in opLog]
opLogStates = [x.state for x in opLog]
opLogTemps = [x.indoorTemp for x in opLog]
opLogSetTemps = [x.setpointTemp for x in opLog]
wDataTimes = [unix_time(x.time) for x in wData]
wDataTemps = [x.extTemp for x in wData]
return jsonify({
u'opTimes' : opLogTimes,
u'opModes' : opLogStates,
u'indTemps' : opLogTemps,
u'setTemps' : opLogSetTemps,
u'extTempTimes' : wDataTimes,
u'extTemps' : wDataTemps
})
@app.route(u'/_get_st_history')
@basic_auth.required
def get_st_history():
"""Returns JSON containing the last n hours of SensorTag data."""
global st0
h = float(request.args.get('hours'))
#perform query
stLog = db.session.query(st0).filter(st0.time >= datetime.now() - timedelta(hours=h)).all()
#parse through data and sort into lists for each tag.
dataDict = dict()
for x in stLog:
if x.macAddr not in dataDict:
dataDict[x.macAddr] = list()
dataDict[x.macAddr].append((x.time, x.temperature, x.relHumidity))
return jsonify(dataDict)
@app.route(u'/graphs')
@basic_auth.required
def graphs():
title = u'Thermostat v0.1'
return render_template(u"graphs.html", title=title)
@app.route(u'/schedule')
@basic_auth.required
def schedule():
title = u'Thermostat v0.1'
return render_template(u"schedule.html", title=title)
@app.route(u'/getSchedule')
@basic_auth.required
def getSchedule():
"""Returns JSON describing the current schedule.
Used to populate the schedule page on load."""
sched = db.session.query(models.Schedule).all()
sendJson = {}
for x in sched:
sendJson[x.id] = {}
sendJson[x.id]['day'] = x.day
sendJson[x.id]['tHour'] = x.time.hour
sendJson[x.id]['tMinute'] = x.time.minute
sendJson[x.id]['low'] = x.lowSetpoint
sendJson[x.id]['high'] = x.highSetpoint
return jsonify(sendJson);
@app.route(u'/scheduleSubmit', methods=['POST'])
@basic_auth.required
def scheduleSubmit():
"""Parses the submitted form and populates the schedule database."""
# When we receive schedule data from a POST, it contains information like this:
#
# timepickerN xx:XX(am/pm)
# daypickerN D
# highTempBoxN A
# lowTempBoxN B
#
# where:
# N is an integer >= 0 acting as a unique identifier for a schedule row in this POST
# D is an integer 0 <= D <= 6 representing the day of week Sunday...Saturday
# xx:XX(am/pm) is a human-readable time in that format
# A and B are floating point numbers representing minimum and maximum temperature setpoint in Deg. C
#
# These groups of 4 bits of data may not arrive in order, and may be mixed up with other sets.
# Additionally, numbers N in the set are only guaranteed to be unique - not guaranteed to be consecutive.
#
# This method must parse this POST data, organize it into schedule information, and use it to replace the
# Schedule table in the database. The controller will read this database and use it for setpoints.
# Loop through all the keys in the POST and organize it into a nested dict[N][attribute] = value
entries = {}
cal = pdt.Calendar()
# loop through keys in POST, format them into nested dict
for key in request.form:
# key is a word ending in a number. We need to separate them.
try:
Nstr = re.search('(\d+)$', key).group(0)
N = int(Nstr)
attr = key[:(-1*len(Nstr))]
# make a new nested dict if this is the first time we've seen this key
if not (N in entries):
entries[N]={}
if attr == 'timepicker':
(dtstruct, success) = cal.parse(request.form[key])
if success:
eventTime = datetime.fromtimestamp(time.mktime(dtstruct)).time()
else:
return u'Error: Failed to parse ' + key + u' value ' + request.form[key] + u'as time', 400
entries[N][attr]=eventTime
elif attr == 'daypicker':
try:
dayInt = int(request.form[key])
except ValueError:
return u'Error: Could not parse ' + key + u' value '+request.form[key] + u' as integer', 400
entries[N][attr]=dayInt
elif attr == 'highTempBox' or attr == 'lowTempBox':
try:
tVal = float(request.form[key])
except ValueError:
return u'Error: Could not parse ' + key + u' value '+request.form[key] + u' as float', 400
entries[N][attr]=tVal
else:
return u'Error: Attribute ' + attr + u' is unexpected', 400
except ValueError:
return u'Error: Could not parse string ' + Nstr + u' in key ' + key + u' as integer', 400
except AttributeError:
return u'Error: String ' + key + u' does not contain a number', 400
# Now that everything is organized, we first verify that we have all the information we need...
for n in entries:
if not (('timepicker' in entries[n]) and ('daypicker' in entries[n]) and ('highTempBox' in entries[n]) and ('lowTempBox' in entries[n])):
return u'Error: Set ' + str(n) + u'does not contain all required entries', 400
if entries[n]['lowTempBox'] > entries[n]['highTempBox']:
return u'Error: Set ' + str(n) + u' low setpoint greater than high setpoint', 400
try:
# ...nuke the existing schedule table...
nRowsDeleted = db.session.query(models.Schedule).delete()
# ...then add things back appropriately.
for n in entries:
scheduleRow = models.Schedule(day = entries[n]['daypicker'], time = entries[n]['timepicker'], lowSetpoint = entries[n]['lowTempBox'], highSetpoint = entries[n]['highTempBox'])
db.session.add(scheduleRow)
# then make all changes at once.
db.session.commit()
return u'Schedule Updated'
except Exception as e:
db.session.rollback()
return u'Schedule Update Failed:'+type(e), 500
@app.route(u'/')
@app.route(u'/index')
def index():
title = u'Thermostat v0.1'
return render_template(u"index.html", title=title)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tutorial.cloud.forms import GeneralForm,HostForm,CostForm
from django.shortcuts import render,redirect
from .models import General as gen, Hosts as hos, Costs as cos
from django.views.generic import TemplateView
from django.db.models import Max
key=0
def General(request):
global key
if request.method=="POST":
form = GeneralForm(request.POST)
if form.is_valid():
key=gen.objects.all().aggregate(Max('key'))
if key['key__max']== None :
print key, key['key__max'], key['key__max'] == None
key=1
else:
key=key['key__max']+1
AllocationPolicy=form.cleaned_data['AllocationPolicy']
os=form.cleaned_data['os']
Hypervisor=form.cleaned_data['Hypervisor']
SchedulingInterval=form.cleaned_data['SchedulingInterval']
UpperThreshold=form.cleaned_data['UpperThreshold']
LowerThreshold=form.cleaned_data['LowerThreshold']
VMmigrations=form.cleaned_data['VMmigrations']
MonitoringInterval=form.cleaned_data['MonitoringInterval']
general = gen.objects.create(
key=key,
AllocationPolicy=AllocationPolicy,
os=os,
Hypervisor=Hypervisor,
SchedulingInterval = SchedulingInterval,
UpperThreshold = UpperThreshold,
LowerThreshold=LowerThreshold,
VMmigrations=VMmigrations,
MonitoringInterval=MonitoringInterval,
)
general.save()
return redirect('/Host/')
else:
return redirect('/General/')
else:
form = GeneralForm()
return render(request,'general.html',{"form":form})
def host(request):
global key
if request.method=="POST":
form = HostForm(request.POST)
if form.is_valid():
amount=form.cleaned_data['amount']
ram=form.cleaned_data['ram']
Bandwidth=form.cleaned_data['Bandwidth']
Storage=form.cleaned_data['Storage']
MaxPower=form.cleaned_data['MaxPower']
StaticPower=form.cleaned_data['StaticPower']
ProcessingElement=form.cleaned_data['ProcessingElement']
MPS=form.cleaned_data['MPS']
HOST = hos.objects.create(
key=key,
amount=amount,
ram = ram,
Bandwidth = Bandwidth,
Storage =Storage,
MaxPower=MaxPower,
StaticPower=StaticPower,
ProcessingElement=ProcessingElement,
MPS=MPS
)
HOST.save()
return redirect('/Cost/')
else:
return redirect('/Hosts/')
else:
print key
form = HostForm()
return render(request,'general.html',{"form":form})
def cost(request):
global key
if request.method=="POST":
form = CostForm(request.POST)
if form.is_valid():
ProcessingCost=form.cleaned_data['ProcessingCost']
MemoryCost=form.cleaned_data['MemoryCost']
StorageCost=form.cleaned_data['StorageCost']
BandwidthCost=form.cleaned_data['BandwidthCost']
COST = cos.objects.create(
key=key,
ProcessingCost=ProcessingCost,
MemoryCost=MemoryCost,
StorageCost=StorageCost,
BandwidthCost=BandwidthCost
)
COST.save()
return redirect('/ask/')
else:
return redirect('/Cost/')
else:
form = CostForm()
return render(request,'general.html',{"form":form})
def ask(request):
if request.method=='POST':
pass
else:
return render(request,'ask.html')
def schedule(request):
if request.method =='POST':
pass
else:
return render(request,'schedule.html') |
'''
Using tensorflow for simple linear regression.
Uses imperative style of creating networks with model subclassing API.
'''
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# Create dataset
N = 400
data = lambda: None
data.x = np.linspace(-1, 1, N)
data.y = (10 * np.exp(data.x) + 2 * np.random.rand(N)) / (10 * math.exp(1))
plt.scatter(data.x, data.y)
# Training Parameters
learning_rate = 0.001
num_epochs = 100
display_step = 10
class Model(object):
def __init__(self):
self.W = tf.Variable(0.0, dtype=tf.float64) # Weights for layer
self.b = tf.Variable(0.0, dtype=tf.float64) # Bias for layer
def __call__(self, x):
return self.W * x + self.b
@tf.function
def loss(y, y_target):
return tf.reduce_mean(tf.square(y - y_target))
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
dW, db = t.gradient(current_loss, [model.W, model.b])
model.W.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
model = Model()
# Train model and collect accuracy for plotting
epochs = range(num_epochs)
for epoch in epochs:
current_loss = loss(model(data.x), data.y)
train(model, data.x, data.y, learning_rate=0.1)
if(epoch % display_step == 0):
print('Epoch %2d: training loss=%2.5f' % (epoch, current_loss))
# Plot Results
plt.plot(data.x, model.W * data.x + model.b, 'r')
plt.show()
|
import pexpect
child = pexpect.spawn('scp -rp ./known_ppl face@ge0rges.com:/var/www/ge0rges.com/html/face_database')
child.expect("Password:")
child.sendline("face")
# Create index file
# Write to index file at ./index
child = pexpect.spawn('scp -rp ./index face@ge0rges.com:/var/www/ge0rges.com/html/face_database/index')
child.expect("Password:")
child.sendline("face")
|
#! python3
# -*- coding: utf-8 -*-
"""
@author: Qian Pan
@e-mail: qianpan_93@163.com
"""
from configure import *
class EffiCost:
"""
References
----------
[1] Latora, Vito, and Massimo Marchiori.
"Efficient behavior of small-world Networks."
*Physical Review Letters* 87.19 (2001): 198701.
<http://dx.doi.org/10.1103/PhysRevLett.87.198701>
[2] Latora, Vito, & Marchiori, Marchiori.
"Economic small-world behavior in weighted Networks."
The European Physical Journal Z-Condensed Matter and Complex Systems, (2003) 32, 249-263.
"""
def __init__(self, distance_col):
self.WEIGHT = distance_col
def global_efficiency(self, graph):
sum_eff = 0
n = len(graph)
denom = n * (n - 1) / 2
'''
nx.all_pairs_dijkstra_path_length:
Compute shortest path lengths between all nodes in a weighted graph.
'''
length = dict(nx.all_pairs_dijkstra_path_length(graph, weight=self.WEIGHT))
if denom != 0:
for key in length:
for subkey in length[key].keys():
if key < subkey:
eff = 1 / length[key][subkey]
if eff != 0:
sum_eff += eff
g_eff = sum_eff / denom
else:
g_eff = 0
return g_eff
def local_efficiency(self, graph, v):
egoNet = nx.ego_graph(graph, v, center=False, undirected=True)
GE_ego_real = self.global_efficiency(egoNet)
return GE_ego_real
def complete_graph(self, graph, v, dict_dis):
list_source = []
list_target = []
list_dis = []
list_neighbors = list(graph.neighbors(v))
for i, node_i in enumerate(list_neighbors[:-1]):
for j, node_j in enumerate(list_neighbors[i + 1:]):
list_source.append(node_i)
list_target.append(node_j)
edge = str(node_i) + str('--') + str(node_j)
list_dis.append(dict_dis.get(edge))
data = pd.DataFrame()
data['Source'] = list_source
data['Target'] = list_target
data[self.WEIGHT] = list_dis
ego_graph_dis = nx.from_pandas_edgelist(data, 'Source', 'Target',
edge_attr=self.WEIGHT, create_using=nx.Graph())
ge_ego_ideal = self.global_efficiency(ego_graph_dis)
return ge_ego_ideal
def effi_cost(self):
all_dis = pd.read_csv('../data/Other data/Distance_SR_GC_' + YEAR + '.csv')
dict_dis = dict(zip(all_dis['Edge'], all_dis[self.WEIGHT]))
edgedata = Edges.copy()
edgedata['Edge'] = edgedata['source'].astype(str) + str('--') + edgedata['target'].astype(str)
edgedata[self.WEIGHT] = edgedata['Edge'].apply(dict_dis.get)
weighted_G = nx.from_pandas_edgelist(edgedata, 'source', 'target', edge_attr=self.WEIGHT, create_using=nx.Graph())
cost_all = all_dis[self.WEIGHT].sum() / 2
cost = edgedata[self.WEIGHT].sum() / cost_all
GE_dis = self.global_efficiency(weighted_G)
GE_dis_ideal = sum(1 / all_dis[self.WEIGHT]) / len(all_dis)
GE = GE_dis / GE_dis_ideal
portslist = list(nx.nodes(weighted_G))
list_LE_real = []
list_LE_ideal = []
for port in portslist:
LE_real = self.local_efficiency(weighted_G, port)
list_LE_real.append(LE_real)
LE_ideal = self.complete_graph(weighted_G, port, dict_dis)
list_LE_ideal.append(LE_ideal)
df_LE = pd.DataFrame()
df_LE['LE_real'] = list_LE_real
df_LE['LE_ideal'] = list_LE_ideal
df_LE = df_LE[df_LE['LE_ideal'] > 0]
LE = sum(df_LE['LE_real'] / df_LE['LE_ideal']) / G.number_of_nodes()
print()
print('The in-text result:')
print()
if self.WEIGHT == 'Distance(SR,unit:km)':
print('(1) Calculation based on the real nautical distance between ports')
print()
print('"To validate the reliability of our results for the economic small-world properties of the GLSN that '
'are obtained by the adoption of real nautical distance between ports—a high global efficiency of '
'{:.3f}, a high local efficiency of {:.3f} and a low cost of {:.3f}, we introduce a configuration null '
'mode where links in the real network topology are randomly rewired with nodes’ '
'degree sequence being preserved."'.format(GE, LE, cost))
if self.WEIGHT == 'Distance(GC,unit:km)':
print('(2) Calculation based on the great-circle distance between ports')
print()
print('"To validate the reliability of our results for the economic small-world properties of the GLSN that '
'are obtained by the adoption of great-circle distance between ports—a high global efficiency of '
'{:.3f}, a high local efficiency of {:.3f} and a low cost of {:.3f}, we introduce a configuration null '
'mode where links in the real network topology are randomly rewired with nodes’ '
'degree sequence being preserved."'.format(GE, LE, cost))
print()
def startup(distance_col):
instance = EffiCost(distance_col)
instance.effi_cost()
|
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import authenticate
from .forms import Blog_Form,Busqueda_Blog_Form
from .models import Blog,Productos_Relacionados,Rel_Blog_Blog,ContenidoBlog
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.forms.models import inlineformset_factory
from inventario.models import Img_Producto
def alta_edicion_blog(request,id_blog=None):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('seguridad:login'))
if id_blog:
blog=Blog.objects.get(id=id_blog)
else:
blog=Blog()
Productos_Relacionados_Formset=inlineformset_factory(Blog,Productos_Relacionados,fields=["id_producto_relacionado",],fk_name="id_blog",extra=1,can_delete=True)
Contenido_Blog_Formset=inlineformset_factory(Blog,ContenidoBlog,fields=["contenido_blog"],fk_name="id_blog",extra=1,can_delete=True)
Blog_Relacionados=inlineformset_factory(Blog,Rel_Blog_Blog,fields=["id_blog","id_blog_relacionado",],fk_name="id_blog",extra=1,can_delete=True)
if request.method=="POST":
form=Blog_Form(request.POST,instance=blog)
productos_relacionados_formset=Productos_Relacionados_Formset(request.POST,instance=blog)
blog_relacionados=Blog_Relacionados(request.POST,instance=blog)
contenido_blog_formset=Contenido_Blog_Formset(request.POST,instance=blog)
if form.is_valid() and productos_relacionados_formset.is_valid() and blog_relacionados.is_valid() and contenido_blog_formset.is_valid():
form.save()
productos_relacionados_formset.save()
blog_relacionados.save()
contenido_blog_formset.save()
return HttpResponseRedirect(reverse('blog:busqueda_blog'))
else:
form=Blog_Form(instance=blog)
productos_relacionados_formset=Productos_Relacionados_Formset(instance=blog)
blog_relacionados=Blog_Relacionados(instance=blog)
contenido_blog_formset=Contenido_Blog_Formset(instance=blog)
return render(request,'blog/alta_blog.html',locals())
def busqueda_blog(request):
if request.method=="POST":
fecha_i=request.POST.get("fecha_inicial")
fecha_f=request.POST.get("fecha_final")
if request.POST.get("id_estatus"):
id_estatus=int(request.POST.get("id_estatus"))
else:
id_estatus=0
if fecha_i=="" and fecha_f=="" and id_estatus==0:
blog=Blog.objects.all()
if fecha_i!="" and fecha_i!="":
if id_estatus>0:
blog=Blog.objects.filter(fecha__range=(fecha_i,fecha_f),id_estatus=id_estatus)
else:
blog=Blog.objects.filter(fecha__range=(fecha_i,fecha_f))
if id_estatus>0:
if fecha_i!="" and fecha_i!="":
blog=Blog.objects.filter(fecha__range=(fecha_i,fecha_f),id_estatus=id_estatus)
else:
blog=Blog.objects.filter(id_estatus=id_estatus)
form=Busqueda_Blog_Form(request.POST)
else:
form=Busqueda_Blog_Form()
blog=Blog.objects.all()
print(blog)
return render(request,'blog/busca_blog.html',locals())
@api_view(["GET"])
def api_consulta_blogs(request):
blog=[]
try:
#obtenemos los blogs activos
b=Blog.objects.filter(id_estatus=1)
for x in b:
blog.append({"id_blog":x.id,"nombre_blog":x.nombre_blog,"imagen_blog":x.imagen_blog})
except Exception as e:
print(e)
return Response(blog)
#recibimos com parametro id_blogS
@api_view(["GET"])
def api_consulta_detalle_blog(request):
detalle_blog=[]
contenido_blog=[]
b_r=[]
p_r=[]
primer_parrafo=""
try:
b=Blog.objects.get(id=int(request.GET.get("id_blog")))
primer_parrafo=ContenidoBlog.objects.filter(id_blog=b)[:1]
c_b=ContenidoBlog.objects.filter(id_blog=b)
cont=0
for x in c_b:
if cont==0:
primer_parrafo=x.contenido_blog
cont=1
else:
contenido_blog.append({"parrafo":x.contenido_blog})
cont=1
#********************************************************************************************
#obtenemos los productos relacionados
prod_r=Productos_Relacionados.objects.filter(id_blog=b)
if prod_r.exists():
for p in prod_r:
#obtenemos la imagen relacionada que sea orden 1
#la imagen con orden 1 deberia ser la img principal del producto
try:
img_r=Img_Producto.objects.get(id_producto=p.id_producto_relacionado,orden=1)
p_r.append({'id_producto_relacionado':p.id_producto_relacionado.id,'nombre_producto':p.id_producto_relacionado.nombre,'img_producto_rel':img_r.nom_img,'orden':img_r.orden,'precio':p.id_producto_relacionado.precio})
except Exception as e:
print("el producto no tiene productos relacionados con el orden valor=1")
print(e)
img_r=[]
#********************************************************************************************
#obtenemos los blogs relacionados
blog_r=Rel_Blog_Blog.objects.filter(id_blog=b)
if blog_r.exists():
for br in blog_r:
b_r.append({"id_blog":br.id_blog_relacionado.id,"nombre_blog":br.id_blog_relacionado.nombre_blog,"imagen_blog":br.id_blog_relacionado.imagen_blog})
detalle_blog.append({"autor":b.autor,"puesto_autor":b.puesto_autor,"id_blog":b.id,"nombre_blog":b.nombre_blog,"imagen_blog":b.imagen_blog,"fecha":b.fecha,"contenido":contenido_blog,"primer_parrafo":primer_parrafo,'prod_relacionado':p_r,'blog_relacionados':b_r})
except Exception as e:
print(e)
return Response(detalle_blog) |
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Cropping2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.advanced_activations import ELU
from keras.regularizers import l2, activity_l2
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from os import getcwd
import csv
import tensorflow as tf
from keras.utils.visualize_util import plot
def network (input_shape, crop_shape):
model = Sequential()
#Crop image
model.add(Cropping2D(crop_shape, input_shape = input_shape, name= 'Crop'))
# Normalize
model.add(BatchNormalization(axis=1, name="Normalize"))
# Add three 5x5 convolution layers (output depth 24,36,48) each with 2x2 stride
model.add(Convolution2D(24, 5, 5, subsample =(2,2), border_mode ='valid',
W_regularizer = l2(0.001), name = 'Convolution2D1'))
model.add(ELU())
model.add(Convolution2D(36, 5, 5, subsample = (2,2), border_mode = 'valid',
W_regularizer = l2(0.001), name = 'Convolution2D2'))
model.add(ELU())
model.add(Convolution2D(48, 5, 5, subsample = (2, 2), border_mode = 'valid',
W_regularizer = l2(0.001), name = 'Convolution2D3'))
# Add two 3x3 convolution layers (output depth 64, and 64)
model.add(Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001),
name = 'Convolution2D4'))
model.add(ELU())
model.add(Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l2(0.001),
name = 'Convolution2D5'))
model.add(ELU())
# Add a flatten layer
model.add(Flatten(name = 'Flatten'))
# Add three fully connected layers (depth 100, 50, 10)
model.add(Dense(100, W_regularizer=l2(0.001), name = 'FC2'))
model.add(ELU())
model.add(Dense(50, W_regularizer=l2(0.001), name = 'FC3'))
model.add(ELU())
model.add(Dense(10, W_regularizer=l2(0.001), name = 'FC4'))
model.add(ELU())
# Add a fully connected output layer
model.add(Dense(1, name = 'Readout'))
return model
model = network([160, 320, 3], ((50,20),(0,0)))
model.summary()
#plot(model, to_file="model.png", show_shapes=True)
|
from .custom_permissions import CustomPermissionsUser
from .custom_user import CustomUser, ExtensionUser
from .invalid_models import CustomUserNonUniqueUsername
from .is_active import IsActiveTestUser1
from .uuid_pk import UUIDUser
from .with_foreign_key import CustomUserWithFK, Email
__all__ = (
'CustomUser', 'CustomPermissionsUser', 'CustomUserWithFK', 'Email',
'ExtensionUser', 'IsActiveTestUser1', 'UUIDUser',
'CustomUserNonUniqueUsername',
)
|
# =============================================================================================
# Classe <int>
# =============================================================================================
# Números inteiros de magnitude arbitrária
# =============================================================================================
x = 100
print(x)
print(type(x))
x = int("-100") # Parse de string
print("valor {0} e tipo {1}".format(x, type(x)))
x = int(5600.50) # Parse de float
print("valor {0} e tipo {1}".format(x, type(x)))
print("valor {0} e bits usados {1}".format(x, x.bit_length())) # bit_length: total de bits utilizados para armazenar o valor
x = int("010", 2) # Criação de inteiro com base 2
print("valor {0} e tipo {1}".format(x, type(x)))
x = int(1000000000000000000000000000) # Criação de inteiro positivo com grande magnitude
print("valor {0} e tipo {1}".format(x, type(x)))
x = int(-1000000000000000000000000000) # Criação de inteiro negativo com grande magnitude
print("valor {0} e tipo {1}".format(x, type(x))) |
def bfs(begin, target, words, visited):
Q = []
Q.append(begin)
while Q:
now = Q.pop(0)
if now == target:
return
for word in words:
tmp = list(now)
for w in list(word):
if w in tmp:
tmp.remove(w)
if len(tmp) == 1 and visited[words.index(word)] == 0:
Q.append(word)
if now == begin:
visited[words.index(word)] = 1
else:
visited[words.index(word)] = visited[words.index(now)] + 1
def solution(begin, target, words):
visited = [0 for _ in range(len(words))]
if target not in words:
return 0
bfs(begin, target, words, visited)
return visited[words.index(target)] |
def user_recommendation():
#import libraries
import pandas as pd
import numpy as np
import sqlalchemy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
try:
# Create the database engine
engine = sqlalchemy.create_engine('mysql://root:@localhost:3306/lucid')
#reading the json file
ds = pd.read_sql_table('users', engine)
#renaming the empty rows with space
ds = ds.fillna(' ')
#analyzing the words in the column and removing common stop words, calculating the cosine similarities
tf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')
tfidf_matrix = tf.fit_transform(ds['short_bio'])
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
results = {}
for idx, row in ds.iterrows():
similar_indices = cosine_similarities[idx].argsort()[:-100:-1]
similar_items = [(cosine_similarities[idx][i], ds['username'][i]) for i in similar_indices]
results[row['username']] = similar_items[1:]
def item(username):
return ds.loc[ds['username'] == username]['short_bio'].tolist()[0].split(' - ')[0]
# a function that reads the results out of the column and the amount of results wanted.
#the username that the recommendation would acted upon
uu= str(input('Input the username: '))
#num = int(input('Input the amount of people to be recommmended: '))
def recommended(item_username, num):
print("Recommending " + " people similar to " + item(item_username) + "...")
print("-------")
recs = results[item_username][:num]
for rec in recs:
print("Recommended: " + item(rec[1]) + " (score:" + str(rec[0]) + ")")
gg =(ds.loc[ds['short_bio'] == item(rec[1])])
nn = gg['name']
username = gg['username']
print(nn + ", username " +username)
recommended(item_username=uu, num=3)
except KeyError:
print("We don't have any Username of such in our database!")
if __name__=='__main__':
user_recommendation()
|
# -*- coding: utf-8 -*-
'''
Created on 2021/2/25
Author Andy Huang
'''
import socket
from fan_detect.settings import SOCKET_PORT, SOCKET_HOST, MEDIA_ROOT
from handler import *
import cv2
# from multiprocessing import process
# import datetime
# import re
#
# s = datetime.datetime.now()
# # img_path = handle_path(MEDIA_ROOT,"test5.jpg")
#
img = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAFZAcwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDxPHPtS4x0oA9c0voD0q7iANliKUdsjIpeAwxQBk8ZoFcFyOvFPww5xmkAJqYL60AIpwc9PapFGTmkAyRxU0aEHjvU3GiSOPcuP1qZLc44xxV22s2kj3cYFOSB9zDBxSvcCmYdpOBzUTjIyRwK0pACNvOahjt/NBU8DvkUxlGJ2WTippJpApHXvViW12AbWzn9KdHBkYoA0fD+pCFlRxnmpNVuy10231rPhtmE3yjBpZo2M3I57ikJs3otYlNolurZ2+lRKjSyDfnk96s6VpSND5nQn1q01uEvo0yME0XJ2NKG2SK3XAx8tZocxzsRw2eorXvpfIjwOgFYCSb5Cx9aEJnS23iXUNPCeXKWA/vVowfE64jYpLbE47hq52aMSwqBxha59h++fmk4JlJs9Fn+K0wBEdoc+7cVzOqeP9a1BSgl8hDx+761zUhwxH86Yckc9aORBdjnleaRpHYu56ljkmjbnrx9KRMdalH1qthDCBtxjJNMAwcGpqQj060XAhdCVzTduO2TVkc4yKaR+FO4WK2GUHrUbMx521eIBppAx2p81gZmlWcgY/KlkfGEFaG0Dpx+FRFE5LD8aSkFjPbdxiomYgkEGtTYuTxUTRLup8wJGWXYJ/SkUkg7ugrQMa4JwOKhMa073HYqq5yOKeZFJxUsgBGPT0qsUVRkUXFYe7kc5yKiMxVuvFK3C47VWY/epXYy2tweKDOQW/SqWSAOaQyNnsAKdwsX4rsqcY4qU3WRWX5hz1pzORginfQVjSW5Jbg8+9Ted0JbpWOrEnOeatkFY1bFJjL4n+bgnFOE4Oe9Zu5jgDpT1YqevND8hF8XG0VesJf9IVm+tYofIq3Zv+/T0qSjsftKvZunqK1vD0pjhwehU9K41blo3254NdLod5tg2Yz6mla5BkOitfXI3c7jisvUrfcVOK17hEXUZW3fK3OKrX4HkJIBkBqRRxd6PLkII+YVV3D0Faes7TcngisvC1aHYpbvrQJFyOlSGJ+m2k+yuRnH4UwE3bjnP0pVI6k00wuP4aVonLEgHihgTIQG9Kn3DH3aqCN89OKd861IF6IKOetWI+WGP1rLEjds1OLhlxkZzSsB0ENzsj2LxT1vMDaMZ9awo7rHarcNwjckUJAbdiiST/OvbvUl1aqjkoetULe8jjI2vz6Zq2bxHXl6BXKsVu8jYq2sDQY3rn3qxaFOdpFOvA8wG0CgLsTTLY3F9tH3T3pt1bhbhl6ENzW3oUSxQ7mI3Cs68RzeuSnBakgY9dQNtbhFPFQ2N99ov0LMTzUraTLOmQOvvVSC0+waggPHNJMDpNTl+TnGMVkwcyhferGpyKGXBzxVW0bdOpHXNUI646cqWJlY87K4cnFw1egXlyF03Z/s4rz1lP2hqEBHM3zkcVCH3DHTFPmH7w0wgBeSaY2OQ5NTD1z+FQLzjFTjOKkQoP50E88c0DAoIBFCGNJx3NJup2AM5pu0EcUwFBPSmhhijbimnj60gHM2eaaee1Jzjilzxg96YyNiM9+Ka3v071Lsye9DLuGPSgCBhnNQsnHA5q75ZIFHlgmhbie5nFDUToT2rV8gFfpQ0CBR60wZjGIn3FRCIOea2zbJzTPsq+lCYMxngxwAKiNs5PSt42yN7U02oNIZhfZmPIHNPW2c9RW4LdR6U77OvpVcwjHjt9rA4qV42kXB6e1aRiULxSFVBqeYDPS3xyOTTvs5Iz0NXtgPtmmsBt64+tMCkImVuf0qWBsTL9ac446n8ajUYkQg9GpMDRlPz9K19Ju/JhJbORWRKpbBHUip7Z/KgcN1FAMvXEm9hJ61RuZy1qV3cA0PcZsSx6Kay/PDW7qevamFmZ2onM+d2ePSs8qM8tzVicln5JqLd/nFNMo6lp7RJmQ6efLWQRGXYm3ccY4znuO1SobF7qW3FsgaMZJMYwemceuMjP1qe30ZfNmvRaCVmk3ed5OSnAGN3+etVI9MSGSOYXLicMzO5ckNuznCk4HOD+ArxdWtLng3utL/AIjUnsZLd7hrDZEsZkDtEpDL7Yz+RwaiuYIrq2/d2LQOrAn92OQQehXIP51LFZ+VI8jXkKyMhXdHEEyTjlhkhjx7dTVvSWXTJppvNtXMihSqpsQY743Hn3rSm+Wabv8Aia05cs1J3/EwRZYbDIQPcU8acjAkCuubV45ODDZn8/8AGoWvIHGPs9r+AP8AjXZ9Zh2f3Ha8XDs/uOQfT1DY25FV5rXY3QYrsZDbPw0MA9uf8aryWFrKB8mPTaaPrUOqf3FfW4dn9xySwEc9qnSE46cV0iaRCfuLIfp/+qn/ANigc+XMMe3/ANaj61Don9wfW6fZ/cc75PHHFM8sgYFdMdKUDlJB+H/1qZ/ZUQ6+aP8AP0oWJj2f3C+tw7P7jnwrqQNxB+tTJcXMZwsjH61s/wBlw8jMn5j/AApf7NiXvIPy/wAKf1qHZ/cL61Ds/uMyPUruIAiVhinPrl10Yhj2q41ha5+aZh/wIf4VCdN0/B/0g/8Afa/4UfWYdn9wLFQ7P7hi+I7xFwGxSDWZLibfImWqO+0uGOz8+3lZtpGcsCCOnGB61QtSFPc1rTnGorxNqdWNRXib51aN8CZTuq3Z3cJdTuAGefasBV3ks3FOD4b5fypt9C7HpU0kdxpxdHDfL0rkpE/fE1UstQmh/d78A9qsMxPzY60JgQSAiQ5/KmHknH0qRsbiM8+lNU+1NsYIBjmpwu0CmqMKOMU/HrQAmMnpS44peBRQIbgZ60hHHFOPSkoAaRwRTfLJX0qQLg+1OIzQMhCncc0FT9akKkr0pyoTSGQfwUo6dKnKfLwKiCYJyKdhCDgdKcozyaXbzTgucUAIBg0hXB4qYJS+X60rXArlRzimFD+Aq3s4ppj9AabC5V2YHSkK8cA1aKYpNnbHFCBlbHNKE98VOIu1LspbgVvLyenFMMfGRVop7Uxl70wKrIScU1gAas7Mnp+NNMJ+7QmCKjJkHjNV2TDfStExYHT8aqSLluhzQMtqSYwfakeQLbsD1NNiP7vio58hSPzoCwgYGwlU/WsmJsyhf0rXtBvjlQjgisZYyko4OQ3SgSY67g27cA81V8r1IzWvfoPsyP8AhWT5gXjIqr6AtTs5b+WBDAHIjb5iB6/5FZj3I3EBsU3WXdJRtOBs/qa5w3kgk3bi2DXPh/4UTnwy/dROjG5zx0o2oo96TTybuAOMdKqy6vb21w6OhOw4Y1rdnQXI493QcVL5TbcDpSW3iPSTxICv1FWP7W0mX7k6fnRbqFiCGBp5PL3Y+tWpNDmjOVy1ILi0BEiTJn611Gm3C3ECspHpQLY46W3ntPviRR9aiF7LGwxKxHfLV6Te6fHfaaw8sFgOteX38H2S4ZHGCOKENal+PVHX+M8981cju3l5EmTXMeaU4Hf07VZt7x0YbaYWOhNw+7HBr0H4feH7PUne81SJJET/AFaN936mvMoL2OQDd96vbfAD29z4fVVwHHXmpk7DSNPW/B2g6pbjbaQQuowGiUL/ACryPxf8Or7QbZ7+2Uy2mecdU/8ArV65c218LhUR28vPUUvjG8gh8Kz28zjdMmzBNZQqNyKatqfOj7v+EdPHPTH/AAOs2BNi7jXXT6ek1w1nCQyNyOfbdUU/hmW0UvPwKmg/i9WceGs+f/EzAPOMd6esbPyBmtF4IIhyw9KrtdwQjjntXSlc6biRWrtzyK6/SfDjar4eubuNwZoATtz1xzXESamx4QYrovB/iC7sZ50UsYpV5GeKLWAz9uB0oVM1PLGGmfb93dxTlixjNAIiC47VKBxUgipRGQKAIdmaXZx1qwie1SCIMOKOoFAj1pdhq0YvWpY7bIGV5pAygVxShcDirctue1RNGy9qdwuRKpbjBqSJMN/jT/NEMZIHNPhZp0Hy/NTshDGX0xmmGLeu7HSrBjKvtYc1YjiPTHWgLmX5dSpETVx7Mq1WYbUkDigCgsJx0qVYQT0rSFodvSnLZt1wfpSsFzLeAEcZB96Z5AOMVsvaYHC0yOzJbpSuHoZJt8fw1E0BBFdAbA9duKhFn82cUw3MYQN6GmtCwPQ104shjpVeW0G7aF4NSK5z/l8U0wH0rpjpgZOF+tQDS3Eh+XincaZz/wBlYDOOKaIuflWurXT12Hehp6abCzkbcd6B3OSMGQeKpXEOPeu2urGNIztQcVy1+gV24oBGfGvy8dabMhKj0zUicKeMGgjIyelUFyrbtsnYZPIqnPGDcE/rVn/l8HbJxTZIwlweuD3p2uGwl4QdPi74NYj7dx5rbuv+PMqORmsKQLvPFAka/iGN3u02k48sdPqaxvsDt613n9iTal++jQsB8n9f61Xm0GeFG3Jx9Kxw9lSiY4b+FE5S3mu7FCkTcGqQtvMnzMThmySa6qTT3CkFefpVSXTWPIXp2rbQ2Oh8NfDm38Qx7rUqcDnLEiunHwScjH7ofnXK+EbzxHoOpCbSstET+9ikPysP8a+g9E1+HUrJHlxDNj50J6GjQhtnkUvwTuUz5flnPoxFRR6Fe+G7lbW6Qqp4Q9Qa94WaNuBIp+hrhfiM0ZtrPHLiXt9KLXE5MwNPmXlD6VyPi7RhOZGC+/FbtnMscoJ496g1q4SVScjBrNKzNEeQSSyW0xjccZxirUQ8xd6mp/EFoqzNKvFY1vdtbMMnIrTVlGsszREHJrt/BfjiTQZtkil7djzj+GuHhliulJwMmmrm2kz0U1DV9xbH0WnxL0OW2B81Q/oVOa4PxR4lOt3H7nIgXp2zXIWsyzwrt+9irablQ7utKEEtQbLmlSsL2CQDkE8H8as+Kb+W5UFW2oqgYHFUtJJaeI98t/WofEF2io8f8VZYbVy9WcuHXx/4mYLHew3MST3ps0ARd3X0piSZNOJY9W+ldSOkiVMNyP0rc0NcSsfasdDhutbWi/6xu/FDQFyMEzP9asbcnpTbUA3LjPetLygPxqHoBQC+1SeVnHpVoR/5xTguR93mpuMjjtsrwKkW1OeOlW4kwtTqg60wM42h9Ks29nle35VaCZbpxVpVVU46+9ArmDcw+XLjpTTatIpxWrdR+YRnqKSMLGp7+tId7GP/AGazxj5c1asbLyG3OOhrXt1EnPY1MLJXc5Jx6U0IzPsKyzbiOM1afT0V1AHWtRLURp0wKZI2z6UDZlG0XcVqxFZ9MDg1eS3jPzk8mrEUQC4FBJRForD1ojgC5rVESlTxxUPl4U9KLsCjJBkcU2O0bdV7bxihG2vjikMgNtlMEcVF9gG+rpmJyaZ52X5HFMSGC0KrjtUUlr+8Hy8GrZnBbk0NIoA7kUgIRbkLmmbBuxjFSeadxPammT5s0bjHHaYiOKhjj28mleRTTQ/vgUaAiG9VWjOBj1Fcfqa4c9jXYT/6o+4rldUQ56cU0xmIoyCKYBzj9Kei4c46elAUh+RVIRQf5LoN/tU+55lDAGkuQPN/+tSSHlDmmBXuF/dNn64NZEiMz5zj2rbnBKPxnisSR8uTzTBHtHhKSNNMlD4z5x4P+6tbNxHBMmNoBo+H/h+y1bw7PNcK3mLdMgZWxxsQ/wBaz/FrReGbyGCC581pOkWcke9c9CP7qNjlw8v3UbmPq1jBboXJFYivbEEgZNTLa6p4l1ZLeBSF7k9F961dc8J2vh6zj/flrhv4Setao3vYy4L64gGLZhGfXANWYm1ueEypcy7R1G6sj5wcg1Yh1C9jjKLMwX0FHKVcmj1bUI2/4+ZFcH+9U97c6/Mi3M8E88K/x9cVlktv3k811mjeODYQra3tsssHTI61V30Foc1Dq/mDC8P0IqKa6Z87jWz4luNFvbuO405NkjffAXFYc6pt4PzUrX3HdGDq3zo2RXKzqSxwa6jUXK5DDNc95Rnuo4RyXcLge5xTRRDbSy2soJB2mt5Sl1FwRkCvTfEfwkX/AIRKO609We6jXc8eMFhjnFeO2jyWN35UmR82DkU7EqSZsaZdm2uRG5xk45rp3IeIuPTtXJXy4KTpW/p92J9N684oasPcvacwjZGHON39aLmBJ2aWRQcnPNR6e3yRk+/9asTSZhIrkw/2vVnNh/t/4mZMtpC5+QbapSW2zgZNaBOckDFROe1dSZ1GbnqCDWxoh/fMR6VlSrtzyK1NC/1jcdqbYtDXsBuuJDWqMEYOaybPi4kAP8RrTB4I71DDoSgCn4GRjpUCk5/pUqnB6dqVgLA+VRzT0cE1AGGRzTlPNL0EW9xyAKVnwuM1Gvy/MTTmJIwcYosCYKRnk5qCXO7A4zS8qeKd98gEVQy7ahFwAxqwsypJyfxrO5BwpxQuehpCRqSXispANV2kZwN3T0qsOCKl3L0oGWt+EFSwuwXI61WjYY5qwrAL/WhiLHnMF7D1qATdTSO6nJzVcvuyBQwLAkH978qQcMST+FVWfBGDgUgkO80Bctlwy/4UbwMVULHGRTRISKBlx8H61GM5GeKgaZwf8aY1x270wuWWkUKcnmoWlAGKheQEbm61AsmW9aBFtn2g5phk+X6+lQNMDld2D2pokyD61I0iaSRtmCc1j6nHuj/rV55wQATVS4cSLyaaQHPbT5hHSk/jFSyr++PNR42tQC1My9GHz3pBzDuxT75fm5qOEbosZ4qr6FCOcqw7kdKyXyGI4rXXl8YHpWbNEwlYY70aiPWNM8cQeDvAVy3D3s94wgiB5J2J19q4zTJ7rVrqTVNUmMtzMc7mPA9hXO+Io2m1a3XJKhAduehLHn9BXSW9uEsVUccY4rOg7UY2ObCxvTTZ0VlrsmjytLbhCxHcVjalqVzql691cyFnb8hVZOVILUw/Kcda0ibWJkmxnPJoR9zYPFQhsGjkn5abFYtPHxkmmKgPWovMkBw3NOV+PelYpDmhCkMTiuwuvBdtf+Fv7QsJc3CLvAHfjpXGMxbqa67wl4lOmwPZzKXjboKHsBwupWJu9Ie4j4kjGcevqKq/DrRf7c8aWadYoW81+M9On613OsRwGOZ4UCiQ5x9a4rwPrz+EvGiNKmLd38tz6c8H+lNBe6PqxYl+zrGR8u3GK8C+LPw+NvrEeqacgSC4b96B/C3r+Ne6/wBpWy2YuXlVYyucntXlPjPxhHqshtbYBoFb75H3qauR10PM7rSDDpoBJLAfnVXRgUhdTwOlbsp3qQec1kRx+W8wxhevFBakatnhY02nIycfmaiupQgIPQ0mn/JYxe2f5mq+pHGD1rlw32792YYb7f8AiYgO5eBTD3BqO3ukHD0ss8QB2kZrpOnqQXKgjr9av6Cf3r4PSsiSYs3pn3rW0A5lc4xxSYGvak/aHPua0Vc1kiTZI5z3p63TetIRreZ0NPE+frWQLls1MtwMgk4oaA0xJkH1pVk5xWe90vXP4UqXQ3VIjWV8nbTjLg4FZ63IGDmke4wwOcUxo0kYMakDANms6O8UcVMLhS4waAZbJ+bj8zTkOfrVA3mDgU9LoEj5uaBF5W+bHSpOM+tU/tK+tMN6ob6Uxmor4XHFL5wCkDrWYt8CM9KQ3qKCc8UBYvNPx1qMTkAmqX2xOemKge7U59KEhehqeeGUUhmBbiscXY3HH50Nc8E5pdQNR7sKcUhuxjisQ3ZOaX7YMelVYDXkvk4ycVCbpGbcD+FZDy+Y3WmM+O9KwzRl1HYxFRpqCkkk1kySEnFRFzinYNjZkvVyaha+baazQxan9RSsDJnvHxjPFM+1Ntx2qMpx1qGVSoxgmmCJFkDyHFKTg9KrWxzJVp844pDKNyodqhiKquOOanulJFZ4Yov40AywpCzgVFPEWlYg8GlVt7fdO6rX2dqLjK2q2bz6hHKoOFQfzNdBayQvaop5PtUahSj5Aziq1orsREvLBqyoa0onNhv4USSSHDkocc9KhIYnnpW5f6XcabbRz3CHY/fFYztubjpWydzUgc/Nwc1PZypHMvmD5aglVh0qEOd3I5p3Huad/LC8+bfp3+tVgxBqFTmplHc0CSHEZrU0TT5tRv0ggOHNZpjO3NbvgzUodJ1vzrltq7cAmk9BjdYtbvRNYjguTvX7315qO5udJldZfs/70DqEGfzqXxnqiatrn2mB90SptFc+N+Mkc0J9wSua93rt1dQiDzHWFfuqTWaZPfpUYVj1pspCLzRzDUUiaP8AfTKoNV72PymYHg4pdMuFF3vfhfSl1WZJZSycgrxT3IfxC2AAsox25/maq6oduOuKs2GRYx98Z/nVLV2PAzxXLh/terMsN9v/ABMzmcEdKjJyaEKnPNPK/Lmuk6SM5wPWtvQWAL1iYyK2tDGN5p3AuMcuxzxmgZqORx5jD3pVbFK99hbEn408MfWog4JpV+tFhkobnrS7sGogacCKAJVcml80nvUQPFKO/NAmTCQg5zUizsKrZyacG5oAlMrE06OUrzmoc0Z9KLAWhcmkMpJNV8/lS5Ap2sFyfz8DrTfN3d6hxn0oxU7gSq+O9G84qPPFKM+tNdgHH2ppLGnY7UoxgUAMAOOaCPSl3cUDBoBCjpikK5x70E4NG/JzQHUjZRg1WdcA1cYg9DVeVwBSvqBCp2mrKHdVFj83BzUsUvA55oGi6EyKjdcqRSC5AXk003CHjdzRYCCNdk2McVYlOB0yMVWkmQHrUu4SRAihgkRS/NHWfKpCtnGK02XMdUJVD71B5ov0AijHG4HpVtLghQKpxqwGKY8zKxFHKDOih/13OMEd6fJF9knWZOmc1Epw4wKratetZwLvQqrdCayw/wDCic2Hf7qJ2t1r1pe+Hjb3RwwXj61xiMD3qhBqH2hdhNbuiaYl/dGKSQID3NbLQ22M525IpgCDoAK6LVPDVxYMZEPmxeorGMSyLSXkG5Cu0kFeRUmKlgs5CoCROfoKJo3gfa6FT71T1GiexhnuphBChkc+lO1fTrjTJVW4XaWFdD4Y1iw0m1eWWMGb9a5jxJrz6peGaX5eyRjtUoVuhVR/Mk2Zq0y9qr2EO1A79T61ZZ1A5qW9S7WIiozWZqVysUZG7Bqxd3scKnJyRXK6hfNcTFeq+1UhmgspFizhscE1PbBjZ7t2SRWczTCBY9hxxWhbgracjAxTQjXsObCP8f51mawfmxmtKxONPjP1/nWTq7DzB61z4ZfH6s5sN9v/ABMzwRj39Kl8zA4quM5/pUi8D610M6QbBPWtzRTlXxWEcbq29EwFkpASTOBIx96TzFABzTLskOc+tVi3Udqpak7l3zKeJcDmqSnIGetLuwRg0wuXTN/kUonGKqE55oDYBANAIviQFc5oMi7utURIce1L5hpAy+JAaeHzVAP6U9ZT3OKLAXC9Krj1qj5jeuacJKLDuXwwpC/aqP2gjg0vm7gKdriLokFHmY71SMpzjr9KTzCeKQ7l4SDpSmXAqnuYU1pGKnvT2EXfOpfMqkHyODTlajcCyZDu60of1qqTnnNOEmR70B0JXkJNN8w4zkVATk0u6kCJGkyetRGT5aQ9aQgselFgIXZtwx09KB79akYHaeM1FhvpTt2AUvngHNMLnrnFIN3JNNfdjnrSDqIXyuDyauafNvjZG4KniqI6euaS0keK8BLYB4pMo3Nu6P3rNIxM3zdK10Hb1qG6sijrIOc1PNrYEZkZImwap3Kjz261olNlx/XFULkhpiciqEdZpdmdQ1OG2A4c8/SvRPHPhaxm8B3KtGqSRx71cDlWXkV5eus/2Fcw323cI2G4eorf8b/EmLXvDqabpQLz3I2uFGNq9+9Z4X+FE46N/ZxsePQTzJyCcitux8RtCVDsc+opo0eePT5HZDuUelc+uCwArTQ7EenR+J5Liz8s3Ice/WqUJxIrE5Gea421RgQVJBrWiM/HzNijYLHqNl4jtre1VFtsuBycCuf1a9/tC4LhAo9BXOxPIMb3OKu206xyDPQ0raCsiK6u/s8JIOCK5RtRZrwysxODwK7Z/Dl34hvI7ezdVDH589veut/4UJbvZK39oTLcd8Y5prUOZLc8uTxC6pjbnFRy667jAJrtNU+CWqafayTx6gsgTnb5fJrz600W9vdW/s+3HmTF9mRT5LDUosguLx5c8123w48BXPiTU4b27hYafE27kY8wj+ldv4c+BsELw3Gqzmboxi/hr0XW9S03wb4fdlEcKxpiNF4JPYCqSInPseU/FO20yxlstOskVJ1O5tv8K+lcQV224X1FTXl7c6xqc+oXWWmmbPP8I7Co5htt8/pRMIJpal22AGnoB/nmsbU1LMK2bT5tOT8f51kaiQJO/HSuPD/a9WZYb7f+JmY6kMPSjcMgUjNnnrSZA6V1bHSTJz2z9a2tFG1XFYIcr0rb0YlkejoA68GXx1yagwaszDLnOc1Gq/KeeR60JE7EafL70H9amEfyjnmkMeTzyaoZGM8ipVPGDjNPSPafmGaNhz0ouK5A2d1OB6GpGi79aekfPK/jSAh4zxUm4Hr1pWiz3oWPHWmJCEHHFJnGKnWI7c1G0RPNJlIYDnin/eHFHl5apEixSERnkdBn1o3c/wBalEWDSiGgRGSQopN3GMYJqfyeOtCw880xkJOemacue1WDEpHSjyzRcCud2OKBxVoR0vlAikBWAJoK5/CrSxKKUxAmgexURckZ5qQxZORxU6xAU8KKGIp+SSfam+SV4HNXQvXijaM80XCxSWL1GKjlh74rQ24FIVz1pbDMgx4+tVRlbjH45rZkt8k4rPvLdonVuSKYF2K8+4D1Fb1qqXNs/OTt71yQPIOcVtaddvEMo3UYqJJAmRXEe1j61jTofOPA/Kt2ZleUt6nNZk0X7000+40Ta9DC2hzTSybNpwvueOMVkeFN8N75gjDIRg5qfxHDPO8QUkxIMhR/e9az7C/msVZAuCfWs6GlGJz4VfukepzanoWmeG7u5uHQ3LKSIx1JPRRXiKtmTOQCTkgVdujLMXd3LbvmwelUtjKckfpW9+htGDWpr2J3ECt6JAMccetcvay+WwOTW/bXaOMbualll7Zu570xlZT0zUocN3qVQCtIRa0fWbnSblJoHw6+vNevaD8RtPvYQl4WilA5zyDXickPdeTToWYNgcEU0yZRudp8SPiDd32NG0FJf3vElwvQD0X3962vhX4DGlQDUr2Mm4cZXd1HvWB4T0O9vNQhuJLbFupBZ3GM16jrfi3TPDdgGnkXeRhIx1Y/StNXsZO0dDW1TVrPSLJ7m7mWKNBkk189eKfEl34t1RpAHNnGT5aY7f3j6VNrXiDUvGOpbJS0dtn5IR/NvU17F4at9G0PwzCsVqg+TMmEyzn3JqXJRKjC7uz59DMsgXbwaS7P7kGvYJvCem67dTERrB1cLH1X6V5d4p0mbQdWn02Zg2zDxuP40PQ+x7H6VPNc1sRWf/IOjz7/AM6xdQkzKRzmtm2O3TF/H+dYF626ZuxNc2G+16s5sN9v/EyoxOOOlJxj3pSpzwRijb1ya6jpDcNvWt7RG/ctxXPkba3NFJ8l/cdKBliXnJ96ZnI9aLiUjA71BvzT3IZZD49BTsjB459aqb8YpwlIbPFAFsP0yecUof5sCqgl3MDn61IZApyO9MLFjv1x7UoOO/FVhKM9TSmX8qEhFjrThiq3mcetOEwBPNICyMYpxAx9aqefn/dqVZsgUwZMq/SnfSqwmAOM0GXHf8aQFrApfYVW875evNAnAHOae47FrAxSjjiqyzZpXuPlyKTEixThjNUhcdzT/tAGTmkFy2WFGR1qj526nibimh7FvcB9KQOKpNKc4pBK2D/WgDQ3Ac5pN+49qz2kbGcmlSV0HrmkBdL4FAlBqk8zHpTdxB65pWAvNMoGKaZRiqDE4zmgsSRzTHYvpIpbHc1PPaedb9KyInIkA9+lb0DZh9+lIRzk6GN8dKIZmjztPNXtQiXJ6ZrOI+TpSTKNKJ/MiDZ5qtIfnPJqGC42HaxxjtRNKDJkYxihjRav7uCCcRykZK5/U1S+z2l8CYsA+1UfEtvLPqsYQHHkj89zUaVcRWBZJW9qww8l7KKM8NSf1eMjb0HwmdW1OO1eQIhOMmvQtZ+DdvLp+2znxN7r/hXn1vrttDMHimZGXuK6nT/iRc2u0Ndb1X+9XQmuo3zdDzPXfC+seGrlob+2fYpwJAPlNZkd06HIPSvcLz4iaZqdu0V5CjDoSOa467t/Dd1M0iJGm45xtxVNxFBy+0cjb6jMMcHGa27Ka5ucLHE7n0Rc1sWFroEEgZyjKPQbq6yy8RaHZqBDAn4gUtBuT6GNpnhTVtQK74hbIe8oOa6+z8LaJocIudSuEZgclpDx+VQS+M4Gj/cypB/uLyK47Ub37dMXaZ5PdzmqjyozfOzqtY+IIWN7XRLfYvTznXAHuBXAXck95ctcXk7zSt3Y9PpUpXc33hUht1fgmhy6DjBIt+GkV9QxkAV7LZWTmFoN4xjb0ydteJQGTT7lJ4TuKnlc9RXqOg+KtOuoYjJeKk0f8LPtYexz1Fc1W5tHuXLqKfStSAA28bgV6MK8t8e3Yu9UTcMyhcEn0zxXo3iLxxpVtbsgnSZ1HyxRsGZj/SvGNT1B9Rv5bqXALt91eij0p01oDepYjU/2WAOv/wBlWZJp+XLueO9bNqAbNB29/rUF8wU47Cow/wBv1Zy4b7f+JmPJZr2NUXQxyEEcVqsx/hqvPGJEORXQtDpM5jk8da29G+WJuKxCNrEHtW1o5IhYnpVbgPum3Hjiq2cCp7g5IAFQimSLn0pMkgDqadt5PFKBge9FwBV45604ZzzTgP7pyT2pCm3rQAhwRQM4xThzTlGO9FwEUkHOKQ7m6A4p/GD2NKg2n6+tFwGA4XG3mlGcdalAxknFNwCM8GgCPJ3d6mUFhQEzjinqmBigdtCLv7Uctx2qURjGakWPgUCK4DD1x60uTmrBXPpTlQFc4oArBSaMZ6VbCinbEJpBsVBET+FKIz0q3sXmlCjFGwFTym3cdacYicc1axg5pSM9KQFTynbjFKYiOlWhS9aVwsVBEehWnGPmrQwOaTILdKdwSKxtwwpj2xVuOlXeg6U0njFIZTjhYSgkYrXhIVCKqZqdWxxmkwRXuk3tis2WM7WxjitSQ5f61Vlj+Vx2oTGYTuUkxzTxOPSkuIyH+XAFRgsBjZTKLPiG68i7RFzkxg8fU1z5k8xmYjJrb8SLu1GPHXyR/M1kJGctjiuXDR/dpkUKjdCEeliHzCDxx9KeJCRnFPEOCTjmlEbKOR+VdNywBOMZ4q9FYvKu4TAVUC4GO9KC4GMkfSi4h8qvC23fn6VF58g43HFGxmzuo8v5c4oAUXcmfvn86cb2bdxI351F5J6jml8nCntQx2H/ANo3S4xIfzpf7YvRnExzUXkMy0nlYPAzQBci1u83AFzUpv5pfvAGqHknNWEhxgk0gsWFlcgAAAe1W4Ic4x1NVo9i4Aq5aN+/QD7oNIRtxJshVQOlUL8bmXitFyBGT6CsJrsSPjAGDWOG+16s5cN9v/ExGWo2zUzEn3qvIwjU7sZrpOnQzZhiU/yFbOkY8huOaxZPnYmtrR+YD+tAxZl+boRURTb0qa6bbIPSoA2RVkjsk0hzu7UpfIoBA96TAljypzSyHedx/Kmb87cdKC4KnmgSH7MYOMijJz6Cm7xwM5FJvxzQMePcU/sf51H5mVzTlcDmjYLkmflA6Uo4525qMyruwO9OEnakIkDZHWlU+nNR7qUv+VNASk5FOB9Ki8xc/wBaXzBxSuFiYggU9Wwvv6VB54POaXzB2p7BYnGetAzmoBLzT1kHrSuBKM59qXvUPnAdDmgSgHFF76AWOlGearmXPrTg5waNQJScUgPOahaXtnmm+aV59ancZbz+VHGarmY9qRZiAOKYrFrNJkfjVZ5Se+KasnBPpUlalsAYpwPaqgnJapo2Yk8UAPJ5qG4B28VKx5qrczbR0+uKSZWxQmjZskCq6q4AGD+VXS3y5554qtJ981S1Ex+t2clxfI6glRGB+pqgNNmx8w4re/tVOf3Z/Ol/tSP+4fzrjpyqwio8n4nFTlXpwUfZ7eaMFdOlAxil+wS9MfpW8NTjJHyn86d/aMWcd6v2tX+T8Uae2r/8+/xRgDT5R1GKPsMme5zXQHUIsZx+tOF7Ge3NHtKv8n4oPa1/+ff4o5o2U2SNpFH2OXuDiuoFyh707zkzjNHtKv8AJ+KD2tf/AJ9/ijlWsnyMAimG3lyBsNdeZFHU0b1zzQqlX+T8UHtq3/Pv8Ucn5EpHCE00W8vZeorri4Hfmk8wYoVar/J+Ie1r/wDPv8UcgYZFb7tSiOQ4JU11RkUdTSectHtav8n4oPa1/wDn3+KOX8pz/DVi13rIMjpXQeenqKQ3MQ6sM0e1q/yfig9rX/59/iiOVyLBmHXbXJSs6yEkYNdnlJYzggg1iajp/BZRV4ZSinzK13ceHhKKk5K122Y63UqjrTGlaTljTXjZGIxmjNbnQL1B7VuaP8sJ4rC5rc0viDPWmBPcIryY70x4EGfalc5nyetSOQQeDQiCsqI+McVM9oFj3jpUMP3iBV24YrZjigaKqQh1pGgUHbUtvnoOlRzufOFMRLFYtIPlPSovs2GINaVi/wC7Y561QYs1w3pnpQIDalV3dKFtWYZ4qediIQKZbSEDGaBkTWzKeetSLZSlN3anzPhhitOJiLM8UNDuY/kPu6/nTxbNtPNSoxEhGO9TSOAnFICssDN70jQN9DVy3YbD60113NkE0BYqLA55pfJf0q8vT3pAP3nSgZS8tgOlJsYds1eZR5vakbGTQIrLH8oIzikCetWgegNSkKW5FIZREeDml5HGelXQFDdKawG7pTuKxTKgg9aCg9cVeMKccUixIzkFaQykPrmhQd2Sc/WrUluiHIFC2yFS1JsLFZgm6mkjHH5VYFqu4jNOa2RFzjOaVwI4YwW3GpQPmanR4xxTWbFBVkOPJrMu5NspHar+7j0rLuX3E8ZIpJWC4plGwZ7Gq7u247cYqN92c9qjMpU4xTuAvtn35pVYgEY57e1MB59Pel3e/SgVxwc56Uu47c9qjzg/e+tBNPYCRW6k96lR8cVBjPtT0OMd6WvQRcjkzj1qyjc+tUo2+Yc4q0rDr2oGTDnGc04/Wo1cHIp2ccHmkA7PzUFjnmm5HWk3Zp2AGbtVeR24walY8cmq7nC+lCAgZ3/vECq0rOTy1TSMOneq8jAqT0piNbTb3OEb9a1JUEieua4uO58uXjORXU6deJcQe4pMa2M68sQoLYNZEic9K66ZAy9Kwb21IZiDxTWoGTg4re0of6Pz1xWG6ENjnFbWmnbbn+dMRK3+u/wp7vhTmow+ZuP1p0wPPHvTTEMgOelXLri2WqNsfm/wq9dfNAq9aBENsflqKY4mxUsCFU96hl5m5oGX7U/u24qsmPPJPWrcGPs5xVKMAy5p7IW5ZuSvl1DAw9Me1S3IzGO1RW6475xSvcewSkLJzWnE+bfjpisy4A3CtGEf6JmmNlcD580si4TrTFbDYOalYApxUgLbcqc0NzJ7UlucZzTzyxoFZiKx6dKUHDCmjnvSn7w9KBjyMPTWNO6tTGouNCg8j1qc4qtnJWrJ7UbgNU/MaRyKaPvmhjxSAmzlRUatiWngZReaix+8oGSTE/Wljf8Ad9KZMemKIj+7PepAN2G6U6Y4WoifmFOm+7mgBkTZWoZHIfmnQHGRVed8ymlqxiyyYU85FZ0rZY81bkPy1nTnk45qkiRWctio2Vs8dKarAuMtkelTZHrTSGVd/A4yaGJziotxp2T3waQD8nuM07IOcmohkcZ6U4t2zTEyXO0AmjdlutRbsA0AkHJ5oAto/QZNW45OKzUPPHFXY2wO1IRaDbhnOKeD/wDrqvnHSnbs9M0hk+eOo9c0Fu+RUO72x/WgtQA53zVeR8U4vliagmbApjIZXy2RVWRiVp8pOf8ACoJG+UimKxWZ/mPb3q9pl4YLgAnis5ivTNNRirA0xo9AR1ljBHOaoXiZB4zVLRtQBXynPPYVqTLvjODip2EznZ4sNzmrtj8sJxSyQF3+agfuhimgHLkSE9qWaU7DzzTG5GAaiKnuMjNOxJPZnNXrpisa98+tZ9tlXA96v3fRKYWEgPyGqsobzM+9WYPuHmqz8z/WjcLF+IkQGqsWTL0FXUGLbPFU4uZu4pDSLNyf3QFRWuDmpLzARaZY/wARxTASbAkHGKvxN+4rPnOZfWrsQ/0f8KGFiEn0p+/cv3TmoycCmFju70nqOxZg5LdqcvUmkt2GcU896LXAj2nBpT1UUnGDzzSZ+cYpDJujVGWOSDxUueagcHeaBDgeeKsk4AqmDg+oqz1UUaANz81NbkUj8SUM2BR6jJlPyD0qJiRICKfGd0dRSn5h2pMY+Q/L1pITtTA6USH5ajg6YB4oEK8gWQCnzt+6zkVXuDh+KfNzAM+lIZHbvljVO6kxMR0qWBjvbBqpdtmXpQgHSsdo+lUnf3qw7/L9aqNgg4Ge2KYEW75hxVgPx0qg7gNxU4mbFPcCMe3bvS845rQ8+zGf3ePpSie1IOY+RRcm5ngbR/Sgtu+8DV8T2g+9HSiaxOMJzRcDOJwtIGB71piSxJ4Tin77H+509qVxmdGTuq7G3AznnipwbHGdgp/nWQUcUxWI1YKKUOdw9KnE9oecdKFns2PFICIHafTNNLjI7irG+zHegS2RG3cKQimzqByaglkCqc81p7LaU4Tkmonhs1fazYahFGM7CoHPy+n1rca1tCud4GKpz20J+5zmqEYLn5qTeavSWYGcc1GbPnjmmFxkFwYZQ46iuusLn7XAuOtcqlnk9q27G4FnHtXr70mM1rjy4IssPmNZG4sc+9JLdSXM3zH6VGWKseePWjYRKz4PSmhsnimFscDoabuwRQBOpPmA4/Or90fkT0rPi/eSDPars7YVRTQPYmgIEZNVJD+99KsxMfLz+dQSffwBx60AaMf/AB78ZqrEMSD1zVtP+Pf8KqR58wHrzzQBNd8rTLQHc3PFSXIyBUdqMuSTQCQsy/NnP5VahAFv71Wm+9U8HEHSgZCThaGySKc4yP1oIzjBoAW24kPWrB7jpVaD75qzng9aBEOc8UHllpCPXmmlskDHSgosntUB5fAPSpx05qs/EtKwDiGK9MVOn3BVXLEZ/nViP7oJxQJCS43A1ESCTk4p0vJFQsfmpbjLMTZjyKjlbDANxTbVsIwplxyVIPGetMCaVvk9qjtX5bmiST92B0qK2YF2pAMvZMTAipnfdbbuxFU79gGBzTpJv9DXBxSAjSQKzEGqV1L+96mmNPiTFVrqZSTgc1aQi6soaMGqskwBbmoEmbYMHBqFm3Hr+FGwDTIWY1ZTLKDkVLa6bNL86KGHpWpE1lHGEmhIccEYo9QMsrxnOBTwMKecnrmnAgqD/KpGX5elKwtyAqACc84qID0q06gr0zTcArjpQMj2888U9RjqfqKdtG3v9aeqZwf4vSmIeAPLOMColCmrBTCZPeoiuPxpANB2qfemqD1HWpAQVxt5zSbNgpgOJAjOeuOtVsc5qxJyg9PSoTgL05oAeszR4dThqYJfMcu7ZJ9Ka7DGcUgOBn+dAExO1AMkim78KM012GAM5qPfjjrQBK7jA6/U0iI8jYRctRBLFHIHbJ9q0nuIrS3NxtHNPQCsNMnJ7CnnTpwRyPpWRN4gunkLKdo7CkTXLxmA396Vxmz/AGfcbs8U+S0kjQbyOlQ/2hceWjbsZpWu5JVwxzRfuIiOQcUY4/xpVXNSeWNvFFwH2+0dufarEjbwOelQQ4Xg08j5uvFAy5bnEZqFxls4xUkX+rIz0701yNwoA0YwPIA7YqttUTAL0qymfJH0qEKBIMn8KVxCXJ7dqZZcMasTjcoqCBSGb1qrjFmxuOOtWIAfJHaoJjirEJ/dUtwIZFz3ppBypzipZfu5qPd0NIBIyfOx2q0M4PGaqod0oqyeV64pAVt/zEUjEcZNIcEletI+FxmmBbX7oFQyL+8pYzkA4psuS2O9IBuQQeakRvkA/nVUvtJ/nUkQ3xgg9KYyV2Axmon5BK9Kjm+ZuvalU4QhSaLALbHlgaSd/mxTIdwZyM81FPkuPUUWFcsvzHjAqC3fbIwNOZv3IOc1SaQrJn1p2AbqUgD8Hk1CZibYd6iuTuk3Enb70rSDycLQJlZn+bPQ1HtMh9qUIZZAq8+1aEOmySNtC00FymLGZlzH0FWNNt7aS4CXJKt0zW/bafLbgZGB6Ypl5psd0Pu7W9RS6iuK2nXemD7TaP50PUoPSrUd7pt2gllRFc8EMORWbY6pc6TJ5F1l4T3IrY/snTtR/wBJQptek9QKPirQ20DxRfacUIWOU+X7qeV/nWV5WW9B7V678b9FMd9putRJ8soNtKR2I+ZT+W78q8nCEkY5z1ouNEDR+g4qPZzVqfcrBeAKr7c98UJjAoRtGeKeQVUAcnNLs5GDxVsqpHBBNPQkrSE7B2qMAmp5Rlc4qEll7UhjXYKAelNckrTpDu6ikPKgACmMidhgU0sMdyO1PdQOoppHegREV3UbSDxz2p54HNIWP/16BkchI7fjULNzUrNuznmoAMsc9KBAjZcc1a1ST/QkQk4zVVfv596n1RHa2iCoTzQBiH2qe1heSVdqnANLFZzSSBTG2O/FbTSRadCOF3e9CGTyxZRAOwpiLjjviq8OqJNJtI4PrWgqZ+ZVz9KVgCJSU+7+NSqM+tOiJWPbSLuxQAKoB96fj5qUDvSHLN6CgCzEo20OAXXjvUkS/LTynPqKQFkD91iqwX96CT9KtqP3YqFkxMDTQD5B8ucVBDzI3NWXBK1FCv70kdKYIjkHzHPpU8GDBmo5Uy1TQj93ikDGSAbeahIHFWXAK4NV2G1uOtADQP3wOORU59z+FRY+brzU2O3agCo3+sODSPyAKfINrZFQluKAJYsoo3HNErYYYpFPyg0yU9ApzQAyT5j1Gakgx5fFVySvJp8Tny+KBjZWxJgGoxL5TfMfpTJ32sCOtKqArvY0ATQXAYsSMCq15dbWPH0pBKihh/KoJLWRyHGSDTWpJIl0AmT3qvJclz8tWk0uV1wRxVyLSuh20agc9I0jjGCaSKCdsLtIFdZHpTswCR5arUWl7ZlVwp55wKAuc/YWSxSAyda9K8H6LYXRM1yQQPuqayj4djlYMhwevNTW1ncWR4kYL7VV+hB1F7oNrPI5hC7O2K5K/wBJe2ZhtwB7V1Wl6mq7Uc8etaN1aQ3kOFUHNQCPLLu0jnjZJBnisE2V1bsY4mOzORzXoOraKYWYqK514yrEEc09xpnvfxC0hda8E6hBtzJGnnx/7yc/4j8a+bcDbuU9q+tSFurNlIysiYx9RXypqFkbPUru1PHlTOmPYHio3RVrMpOBJg+lMW2eRzsXcKmaLAyTVzTRtQ85NUK5UTTpuTsAFPNvKB93mtzzECdfxqjcXiAMF60AUWtZG6jAqCa1kjUMavxSvKnJz61HdyMsYVgaLjMqRS3OeemKRVPT86fOcsMfpSA468mgCKTp61Ey8ev0qaTn3FNK4XimkBXPrTWNSsowcU0Lg5IoQEJzUbjHIqyU79arOfmNDAYgBdT71sXN4tvBF8gYn1rGh4lGSeDVnUJd+zjhaQyzFqqu+3ygM1Fqlo86h1HviqBdF+dOtaNpqG8BGXIpoDMtLSYzqdpUA10cbFY1TjgYpQyMAVHX2pQuGNK4DsZHI/GpEXPfpSAE9MVKnOPTsaQxQuAKcI0qQjoSKese49KaAkhTCADpTmT0qSNNqgU8rzR0ECj5etNZfmU4p3aheTQAOOKjjX94amNNUYb3oFcY64PcU5F/d05xmhD8tAxjAgVGR+dSt0qJvWgBhXnpUueBio8EHFO6c4oEV5zyTVdnxz0qeYetVJSMYNAyVXJU+g9KhMmHK00nCnniqzON9OwFiRwRtpInwpAqs0wK56UxJx696EImlyzZFMmMqxYHNaWmQ290fvYJ7V1CeGk8hZsA96LC5rHHabo807qzgnPauws/DMojDuu1fcV0uk6XY20AmfDOKfeXZlO2MYQU9ib3MA6ZHEuAN1J9mhiUu2AK0jjGMZrMks57lt7EKh5A9BU3GitJOZn2wrtHqO9XorJwgc/NJ61Na2UcKdefXFXVTaMYHtRYHqT2RRYsTD5h0NPkhSZGAHPtVcB2GP8AJqSJmt2y3IosCM2WGS3YdsVqaZqbqdjHIPHNWHgS4jLFc1UstNBvTk/LQBPq12gj5AribmaNp2Kjg1v+JriGD92rgmuODF8tuHWlcpI+otNydPgz18sfyr5z8aIkfjHVVGP9eTX0jarsto19FFfNXjCTz/F+pyrypnbFKOiKnrK5z5PzkdRV2yI2tngCqjKScgVVaV1yAcLVEmuXMzbF6+1CWboGV0znuaxrW7e1lBHzduavz61MygbAPpSCxaWJLVx8/wBaZqrp5Ixzmslpnlk3ux9eKSYvNx1A9ae4WGSgl88Y9qcp68YoVSOGqYJt7E5oGVZDz6etMDEKR1zUzJmTr+FRlQD07UAiNlxSAqo5yabK3qKiBJzjmmJkrYZcdAKiMBYZqZWxxgc1OowmOtAFCOHyvmPJ96eUEh5FSsuSRz9KeImApWGVBYKzBs8VZjtVQ8CpkjIGKk2kfWnsIco2jAqZeMnrUSr83HFTIpH4+lKwx4XC5zz71LGCflPGOtI20qBSxt60rWC5cWMHB6CpwoA9agDjbx+VTB+OtAD14apSPzqENz1qRWzVEhtpoB3Gpe1MK8Ug9RQOMU0cninZ5pueaAWgrdKAvy0E5FKOlJDGMKibjoKkcUhXOKLAQYNB+7xxUrIAOelQygrTC5Vue3pVORhjuatzqW9qzZflxzyelMBWYqtVWfDZPanSP8vPWq3DP81MQ9jk88VWdsk7e1TOSBgVXKnJ560hokgnlhkDIxDDniu58PeKndVtrkgf1rh4oZZiRGhfHXHakzJDKOMMtFgep7Ny0XmRMMfWjeGXA4PeuQ8M+JlfbbXDAduTxXYFMrvjOVbnigmw0NxtI/Kqtxdusypj5e9W1Az7024tlcMehoQbD1AaMMM4qZflXJ6elZ1vcNbr5b4J9avxsGj3d/TNAiRTuUkYqN9+3qMU4MNp559Khmu41iOWUtjgetA0aenTR+U8bsd+M1WuLnyVcg4xVDTp5DPwm4vxwKTX43tIdzvhmHAoGkcP4h1PfcuNxzmsL+0H7DijUZjLcsdxPNUSvNOxVj7O1i+TS9Hu7xyAIYmb8e1fMFxcNc3E1w/Jkcufxr6C+JP/ACJN39V/nXzmfumpRL3GM56AcD0qB5OSCMgVIKgf+KgaKxPzDgipUXJ6HHvUZ+/+FTp1/CpTGx8YHcVKGRSahj/rSt0amIV2G7gcnvS7j2z9ajP3qkH+sFMGQNzIeaCp9/xpX/1w+hpz/c/CgEZ05Bf29KYvA461K/3mpvr9apDJIOWUN+tXmjwnB4qpD/rlq6n8VJuwupWC/OT1qyq8GoovvmrkfX8KTGyDy8eox60pQn3qY96O1JMRHGCDirKx8VCv8NWF+6frRcGIR8vTFNjj3N1p0nUfWpYOjUxvYcBtOM9KepGaY3Wn9qCWSq2RzT84qt/H+NTr0H0ouMlVqXdnimCnUxLUXOM00nmhejUlIQuacp4pvY04fd/CkV0EPPenKtIO9SJ1FN7ASpatJHuA6VXe1Z93HStqz/1bVAOsn+9STEjm7mAqMVj3XGR6V011/Wubvv8AVt/vU+o0Ucbhx1qKVHXBwasR9R9KST71NsZVLnuK3vC/hHV/Ft00em2jyQx/62c/Ki+2719qwZPumvpz4L/8ky0//fk/9CouDPOH8MRaFG1rcxeXNGPmU/z968/14Qpd4i6969w+Kv8Ax9p/1yrwK9/4+pP96lcS2K8JZGBU4IORXd+GfEn3ba5PsCe9cKv3x9a0LP8A18X+/T6Az1vCvh4z8vbFKVwvNVtL/wCPRPpVmT7v5VKIRDJarNzgA1QR5LaQiT7ue/pWwv3D9KydV+6aaC+pNJeoseVOc+hqGGPzn8yQEj09azR/qxW1b/6tf92n1H0NfTIYkfzm4Cj8q5Lxdq/2q4kC52rx9K6mP/jzk/3a841z7031pdRo5O5mKOxqmZAxyQc1Jeffb6iq9BR//9k="
s1 = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAFZAcwDASIAAhEBAxEB/8QAHgAAAQQDAQEBAAAAAAAAAAAABQMEBgcCCAkBAAr/xABMEAABAwIFAgQEAwUFBgQEBgMBAgMRAAQFBhIhMQdBEyJRYQgycYEUkaEVI0KxwTM0UnLRCSQl4fDxFjVighdDU3MYNjc4dHWSwtL/xAAbAQADAQEBAQEAAAAAAAAAAAABAgMABAUGB//EAC4RAAICAgIBBAEDAgcBAAAAAAABAhEhMQMSQQQiUWETBTJxBiMUM0JygZGhYv/aAAwDAQACEQMRAD8A6oV9X1fU5I+rW345MmHH+mtpi6ZKsJuAtSQJ8i/KT/Ktkq06+LL4scHtLbHuneEWDl/iSx+Gu7xw6WWCYkDupQFK4uSpCSkorJrdZuJZy47pHmQDI9du1V3lgLVjjRkp88id6nlsCrLbyyggwVGD7VDsrI14w1pJkEmOaejJ3lgXPFqm9zaoFxaHEgAgU6w/CUHwi5dOAJ5VHNWsnpph+IodxN8lLoSAFD5jPPtTJOV7QkpSnYGI5gR6UJSvDFbfgj2JZZwDNmEos75SXUJAElBBH5UItPhkyLiSXN0oWpMDcgfbfap4cpoQvW06AIHApW3wx61JiT3G+5Fa0sI1zirRDMN+DHLOKH93eQ2jz7OL/nO9Pbr4EcEcZUpF8uUjaX4jb6fSrbyniqbZrwFqKTyZI/l+VSXE81W67JSWnAnykKMe3FDvKqsqpOjmd1EyB/4RzY7hDBXcFCvIfnJMwAI5qV5c+FjqnmCxRfWORMaurVyAl4MEJP0Brdrop8Ptn1k6nMY0w9btW+EupuLgvoDhJMgBIHfvM7V0Zs7RqxtWrdhCW2m0hKUpEAAVlJPCyUVUrOD938IfVaxTL+QsbbkkavwpP8jQS6+HXqFbJKHclY2jupSrJfH5V+gGsVNpUIKQR7imtXY3ZI/PU/0gzbhydVxlvFWUpESu0XAH5U3tsv3lolxDtq60oHhxspIn61+hR3CLF4y5ZsLPu2KrvOnw5ZGzxcKuL3B2E3ChClobG9Op/JOVNnDROHuJBGkpI7GnFpg7qlwtOpJG31rsQ98CPS9+5S+bF5tY5S1oSk/bT/Wn/wD+CTpaFpUnB1JIEcgz+ldH5Yok4nIzCstrfZKwjUnttsaK2OWGispPzKIG+1dWsQ+CTprdWimmLBy0WRs42RI99gKqHN3+z3W3cKdwHF9bYOoJd2V9I/51NTi/pnJycUqwaKYvkl23tEutgJnfjtUYfyiLhlbjh84mduK3Xz/8JGb8Gy+863buXSWG9Sg2gGfWINaXZpxpWGvvWoJDjbi21jcQUkgg/cVeNfJOMGiqM44VdWC1BaSUTO1L5FucGtr1K8VhY1DSNMwTS+Y8UXcsqU6RpIPPeotltLdxjNsp2PDSvWUCeB61GVp02ejw21RsrnTEbM5VQGwQhQhI7Djt24qK9TG46b2T60wCtASQO8Hj7b07xB0Zjwp1NsYYt25IAPNJZkSvMfTq3w9l9sutlCokmYkf1rxuGV8sopn2v6n6blj6Hh5Zx8GPQvMyLW3dtnvMEnyj09IFbYdGupT2D4rDaiC4Dxx+VaIZPZvMCxgNnT6Aj+dbHdML924xltQUQZ2g8V6b0kz42VaNv82dRb3G8v3FsrQNSI2PNAugedbjFWVZbeWSUunw0Hbbn/WgbLhesgEpKSpME96j3S27Vl3rLaLWShCnATO8id/ptU45TJNVo386bKXh6VYe4TGgKQD6UXz/AGpewBx1I87JCx9O9Q/CMz2y852DbahLiSkxVnXtqm9s3mFiUuIKT9xUorDRa9MohrG2yfDISSTx60ScxBoMAgDcRVfXLT1hiVzbqnWy8pE/QxRM4kQkNlceXuPzrJXtE5O2GX7pPiKVAKjtxX1uGjBW0hUngpFBvHIghU9ppzb3SFJACvMN6rS2yfZ+Aw8y0sn92hKSIgJEUIcy3ZPvFS7RkkHc6B+tOVvkqO4gwYHFfMuaHCQskcEHfegknkZO8HwwTDmSFm1aX7aBtSy8FwtKitFoyhZ2BSgA0qlRebkJA7bVgytLCklQBBMTFZYG0hhcZJwe6larFlRmSSmlmcj4KhGn8A0duNMUXQ8paSUkAEzvTlu4bQjzAah3ihL+QXQAY6b4AbtTxw5oKAgmO1PkdOMupQf+Gsmd5g0Sbvg6ggeUTtTxqQ3BVINLtGbbAKum2WlNAHDWiP8AKP8ASsrbpVltvzIw9DZgxHaeaOrfS3EEQORThOKJ0aoBj0rNXgZYZFbjohlG7IU5hzalepEk0uOh+VV2QtXLJLjAjyKSCBHpttUjRigWNkEd96dN4ulLZ23mRtzRyG7eDlp8bfTJrJfUKcMw9xuwU0pQWhvyJO3oIrX3LORsdznet2+F2Tr51HUpKCQPY+ldqczZVwDN4KcWsGrqR/GkGhWXemeU8qOLVh2FsW5UdWpKAN6aM1xqtsLy8nF7PWX8a6f4ivDMQSWLjSF6e0TA+2xqDOIfu3kFR1KVJPt/yrrF8Qvwe4D1hzM3iv49dlc6ChSmVASNjuCCPWqbxr/Z12iGWU2GMuoWAdRQQQobRyCfWrxmpZlsduK8mk2CY6vAEHw4W4VASRRk5/u/w2paCQNhHJrZHP8A8B97g+CsOYVfhy7bI1JJ2WO8+9Vxd/CTm+zwG4u9SS+hBKWwJCjQfSSdMDrBUeG40i9xcXd0kpiRv3NKYxmBtV6S1GmBwKkeF9Es13fjtrsC2UDzKVPPeovd9PMfw+6et3MKuSptRSdKCRttzVU4rAE3bP0YV9X1fVzCHivlNceuqt4cX6yZuuCsFDuLvgeuyyn+ldgLx0M2jzh4Sgq/IVxivrxOJZvxO7kkv3rzsn/1OKP9aeG2c3LJJosO7WbPLTgKQT4Z2J/WovkUf8RccMntt2ovmbEFW2WENaQCpIJnn23oPkNKlreuJ8rSCqQdzQbpe4pH5LavsWFjgjiPEClKcAj08oI/mKYWLn4pobgLO5PvVD5v632lhiT9i7cafDcJKYmNvaiWB/EPgyGUNqdRr7JVIJ/T7VJZVlKLyWsIaiBJO0UvbnWAkmSBAPrVX2PWrL+ILATdNFU8ahI/WpXh2e8GfSkN3gKVD2n7b8U2XsyrNEiVZLafChBJPI3ppi9s/wDhnSmd0qJim9rmrDnFg/i07qjzSIp1jeYrE4Y6UPoLoSSEzO8etZLrgVxbVjL4NuqFn096yY2/mHE3LLD7lnwWSlClNhWoEhQHeODW7+LfE7lnD1hTTwft1nyOQRI9YrlnY9QMuYbjlwLlSi+hzUQlAJ2Mz6VLLvrXlvF2mmpcb8NMwtPE99qjLj68lxeGXbwjo5ZfFJlq8SCBM9kqmjOG/EFlzEiA34oUe0VzUw/qzl9u3Uhq5HiiAE6Fg/YxFWR0nzIjFroKtVu3CpnQk6tvpW5G4RtCpWzoJb9TMNu0gtNuq9TpkCnDeP2OLuJSHvCUeN4qA5HzdguF4S21d2q2nFjlbZEnjuKi+a8WN1iyBhStBJJISRA32rmhzTk6aKOKRsSyjQylIWVQPmJ5pQCO81VeWsYx63s0JdfSsAcqIJo61mfEEKh0BQ9QK6+7SyiVJ6ZN6gGeesmEZExVqwu2nXXVp1koGwFFv2xf3ABbbVHokTVVdTMsO5huvxT9utS0iAYMxQXJbSoPUP4x8R+WWMv3d3ClqQg/uSdySOK4yZ0cdxHMuL4g8x4Tt3ePXCmx/CVuKUY9t6316lWgwHCLxa2HUNyUypETWpl7Z2WYm3A2dKpMGN67YTUdEJQdmuublaGkpSNO5G4qN4ZcGzKnAYM8jerNz9lw2qFat0pB5ESarG41NQygDaJIMVpO42dHFcWbLdAMJOY8qYqp6CdZlJMHTp5j7GqgzFiS7DFH2WVuNBKzpAXHB5q/vhhwNbmUsQW2pRCtyRP+EiP5/nWvWfGEWOaL9hYUXGlxB2Jr5b0kmvXTjeD9i/WuNv8Ap/hnLYby/cKv0F1agXE8nvVw9IMSLWNtEgmCIAqhsp3IRcraMlRE+gq4OmN+bLMVqko0oKwSK+kwfjUr2beNvFlIIACRVX41mJWD9Q7a8KoShWwmPsan9virS7dK0yowCQozVD9T8ZQczN+GorKZO3pNDbwTUbd/Jup0kzf+3844S8HdQKoMDmtzAZSK5z/CjeOX+Y8KAUpcrCuRA/62rown5R9KRv3FUmlTNauohTZ53xhsbHxir0iQD/WgjF0gNnURPqf5Vn1jxUJ6h4ugQrStER66EyKi4xIKAJOxE8/0pVJVkLirJQbqVgJAkAzFO7N5JJJO4/KoUvFgwtEKAnaKeMY83rnXBPvzTX4RJyrRMi6FTJ2H869RcFC9u/O9RZ/HClJKT5CO1IIzL4ThSD8259TR8WZVonbeIpDZVMIHG9Yv36XVjzGB6HmoW3mRIaIU4kAeuxpo/mBQUQFJAHqaTt8AUkT9OLi3OkT96VdxEOoBUoA9oNVknM0FWo6t5ieKXOZYY8yoB4107TasL+iyW8ZSEAbhUgQO9L/thRTIXCf1iqpczg2Buvzp4TP60orOx0HSrVJjc8GKTejSxRZl5i40hSV6iY253r1jH9lQR5RMVTrmc3wtQS4CSBvq2FYJzmstT4mkkbwa1WFS8Mt1/M7iTqTpPYJJrxrNvk8yxx9Jql3c83DnKgjTyPakmc7Qf3pVAncnY0Wm0N2SwXO9m9pC91n7d6H3OeUqcSlK4JqnbrOqC7qQ4EieJoe9msICUpcAnfc96EYOsiuXuLovswWigCLk6+Sn1oW5mhsKPh3BAFU5dZnUhB8N0hR/w7/ehX7dWpadThjVzMCPpTVWjSWLLdxDMqPElb+oQTqJmgq802jK/wB64NCtiknyx9KrC6xxSnT5pgeUzzTBWKJS+HPE1E7QpVFL5M25LCLJexvD2StTSG9+wA2FRy5ucIeeUtduhajypKRB96hl5jCUqPn1d4Tv9qaqxpQjSIHpNFRzk12snW2vq+r6sMAc/YgMJyPmC9JgW9g+7I9m1GuNmCJL2INE+XUQdhxNdkOoWXHM4ZFx/A2XfAdxGyetUuf4StBTP61yyzf0Zx3pVj6bDGbVTLiDCXADocSOCk8GmjV5OecbkgPn24DGAoSQCQlKZHYdqGZFzTa2eHXIJlakkT7fSker+Ips8ACS2CYGw23+9UIxnpywdWlHlSrygxT9VWSsIqnQXxLJ7WP5ixO6SVKS68opIHImvP8A4WanAUq2j3kVnhWd3bcOOJsnHSRuUDn7U7X1bFtJdwu5bQk6SrTE7dqV9n4NbqhS06VsBSVFwpgRIE1LMHyHb2mgIecB9jH5VFE9b8LbkKacQuJjTNF8L63YOmHXAtsJAJCmyR+lDN3JBi35LItMmL0IWl10CflVNWA70ytxk925Fy546kjk7A/96qSy+I7L4aAUUqSgyToV/Qc0dd+JzALvCHGG7ttECQmfMT7ClrthCu7tEh6AfBanq11BfXjH41OAtulLzrQKQfUauJ/1rc9f+zj6LG0LKcIvmzEeIL1eqox8EfXBjFOnpZesF6EuKWl5B3IJJMittcPzDh+JMJeZuUaVDhRgipLkbbUjpbpYNRMX/wBmBkG6Wo4fjeKWIPAUoORUu6JfBPbdF8wrxC3zE5irKgB4Vw0ARH0rZtL7S/lcSr6KFZhQPBBppJSVNmU/kjuPZOtsYtQ0G0NqH8QoC/0qw9ljWgkOpHzDmpxcYlb2qtK1+aJgb03s8wWF/cqt27hPjp5aVsr7etTjGMcRYHkrG0wW+s8UShdy6u2kjRE/SpTh10zbYyi3dbLiVAQrSYBqZ+EiZ0Jn1iviy2VatCZ9YqjTZFRSdo8Qw20PKgJHtUExXqNgdnidzaXJQpbSyhUCdxU8eaDzakEkBQgkc1W2OdCcHxl95/xnG3nSSVepP0rUPsqP4mM85axDpviLFs2hx9xohEJiCRzXNG8Zey/dpebXDQPmA4ro18RHw1YinIN9dYLdB9VugrW2qZKfp3rSTBejmZM44kvClENq2lQRv+tbsk6MovyQvGcGt86YSQgoLnZSTwapjNGXLfArgWw2dUeFCf1rZDM/S/GejV8g3ilP261QsbSB67dqb4n0tw7qE5Y3jC2wtKwToiT7VpvpFs7fTcf5OeHG/LD3RRP/AIZ6burWrw1La1SkBA9fsPrWquenFYtnLEHUnxFuPn5RsR9a236muNZSyi3htmpIfWghaAY0pAgTG/etZ7TL6/xan1jzLUVH23r5X9Ig+T1HJzyP2H+s+Xi9N+nen9HxvNAnL2DO2lwi4KSFDc+kelWblB5V1jbWs6Jjen+F4Ay5ZKc0hR08kVjglobXFEkeUD0719erej8Sk7WC8TjTeHWKt4hPlg8mKo3H7lWJ42paVFSQQnb671K8bxoosktJJK+AEnmgmG4eEL1ObuEz9ftWaaWAR/g26+CfCl3ObLNOhQQwnUfauhC1BCCo8ATWpXwJ5NdtsCuscuEwXTob27CRWxnVDNLeTsj4ribitBaZVpPvG1c97bZR7SNO+pOa27/PuOXBVCTdKCDM7Db+lR8Y9LaFFaQmTzVbv5kXeXVxcKWFKWsqUVK7kmlP28pTJQVAbbGeatCNxqRFv3E4ex8JUDqlJPE14MdUp5KtYPsKrd7GHYnV5e014cbWgeU+ogU7VYRndFqXOZE/hVJ8WB6cUJGall3SpUxt9aga8dWAnUoidt+9JKxWHQew5nmg4qtk3gsNvNSlOHWqU+lJ3WZ1pUoJVI7mYM1Xa8ZTMEwe4B714rGNQUZ0pJ5B71lHNMaqRN05nUoKhyR2pZWa3HWUoLolO0jvVbu4iUqI1DfcKmsxiSi2kJIUYqiikgE2VmXSoSvUDImazZzRLR/eQs7QTPaq+exIJAAUdXccmk1YkdOyiBH6moUkG3VE3czMUkkErXxArBGaHUpPnCQR333qBfjQ0qNW6RzuB9fel04hzBSrf6g1Sk1aQGm6bJfdZjIBhWojf60gnMq1hKBskj5jsKh7l9Gsjcnkzx61gi7BlQkngzRTxRsyXuJVd46hpyCskjYRTP8AbfiKJUSR6+tR+4uzqMHVJ7GaTTdEIjUOe43pLSyho5JO9iLjgBLiQkjbfimS8SUVwpZCRwAeKDF8oJVJBP5V9+I1K3UJ5ntRk3Vgat0gqq8UXZKyJEUg9cKWuQuCBGo9qZm5geUyRzSK7rUo7CTtNa3bbRlseuP+EIWoOnmRxNNlXK1nUSN+KZuPk6U6oPG1Jruk6iCobbc1teTUrtnaSvq+r6lHPiYEnYVo78VnxF5Uzy29lXBLVeJXVnc6HMRKQGkqSYUEHlW8ia3ffbDzLjZMBSSmR71yr6tZHX0/zXimFIZ3tbpaQpXK0Eyk/kaFpPKJyIdmvJj2ebJ1KVKSoJn8vStQ86YM9lfMVzZuHzNL8qvY1vhlptK8IuXFSlwNQZOxrTPri0FZvvni4tSQ58xHlJgSR95q8ZKTyaNRdIj+GZ9ucJbQ0hpDiR67H7U9d6mqvGfBdsG0JWnc6jNQR3SEz5pO071glKUhKikpKeadqEnbK9fgI3T/AIzxeAKUj0HFLM3OsK3BTHHehDawFeWN5mfT0inbDsiQmN+1a/FGpsfMKRKgkiD2mnDLoQsqKdzzG80NL2hSdII9NtuKUTdFUq1cjgjep6zQ3R3hm2vwu9Vb/L1m9asXxbYkpCT8o2roL0psrvNuWmb/APaS23FAEoCtkmK5A9OsdvcMvNDCS6ViSmYjetlun3VvPeHp8HC7i4aQjbQTIH0Nebz8fb9rplY/aOi7+Wsy2ST+FxZawngLWTPtSDT2dbRakl8OJAmRWouAfEf1PsnEJdR+J3j97JJ9Kt/LnX7NL9sld9YpCo30gyf0rlk+WObTDXyiWYj1KzHl7ElouUJUZ3Cp96UwfNreYMaZdcfLFyDIIMGap3NvWK1v8YWq/dDCzsUHfueakOQ1YJjbqbhGIobSVSB4mkgkV0KfaPvQibjlG3uFZoAska30uqA5VyaK4Xjab8mSgATwaoBvLhlBs8bWEclKXpH5UpaW+ZWb/wDDWOJFXl3CgD35/KnylakZLszY/wAROmdQj616FBXBqmr1nN9rhBUt1ttwCSY5P2NCMOztmfC1IDymnhEqAmsuVrY8eGUlhF345hDWPYTdYe8pSGrhGhRTzFa4ufDTjOU84vYvhV2jErFwz4R8jiPb0NWphvU9SLMv34QwhPzEnYRuZNS3A844XmBlK7W6bXqEiFVvzQk6bpjfg5IxtxwaP9fMCcxW1ew5y1Uq9ST5FCNJ9TP1rVU2mLdJ75BeWpdo4SvQARpB9NuB/WuwObsg4Lna1LeI2iHHNJCH07LTPof6VrL1P+EFSsIxK8TdO4wpCVKaRoGtCeQkAc/aufnl6hTiuNe3yfQ/psf0yXpuT/FTa5PBqxaZEsut9ily1vdNxp5kSD2kVBsxfD/mvJV0k3GGuX1nqkXDBB29xz+lRC0x3MXT7O1+rAXXrN61ehTZER2gg1aqfi7xq7sPw+OYa2t1KdJdbGjUfevR4+KEFUFR896j1HNzS/uycq1ZGWMHetrZaSy4DHylMGo5cE2b4ASJnmiOPdXkY46pTTJRPCYNRlzGFXCg4tBKTwBxNVtw+zkWXoekp1F92fYE1Kel+VLrPWb7HDrZlS1PPAHSJIHdX5VFcDy7iWb8XtrGxtnbm5fUEtMMiSoz+g966YfCb8MbfS7CW8XxlpC8auEgxz4Y7JFI5Ur8ldl4dNMnMZGyhYYWygJ8Nsaz6mOa1j+O7qmi0w+0ytaP/vXjrfSk8JHr+lbO9SM9WPT7Kt7it88lpDTZIkxJiuUHU3Pd11BzjiOOXairx1kNonZKBwK5lmXVG+xk1e+XmKcHEHDBUvYDYetAmboHnYcCKcB4JTXopJo5/wDcFvx/joSFEJ9u5pRq6IkgiQIAoQLhAUCdvUEVmLr+FJA+lZYwmZt/6VgevXBd5k7xHpXgckgFW+0kU3W/pQNJgnmkVXYQoyQfY1nS2ah8p394RsY70mu4lcqTsBxQ83iJTvxtM1h+MSFiSJpYxcmNXmwiLnk6dyeTXyrhTK/KqJG8b0xXfNqTAIEGd+KZLxRtKijX50idqdx8MRoNOOp0Ekk7cmmq7kmR+XpQd/GEhI84FJHFW0oJUsAK3HrU1CmjOkrYUeuVNmOPc18b8JTEbH0NAX8ZadVo1hSjwTTN3ECkFQd2TEBJqqXZ+4eMsUSdF+dR1bj3rIX6FI1THpNQpeKqU4T4g3HBT3pdrEyCSpzcHimSTQkm4+SULxQSBq/OvmsSK9plJ9DUOvMUTrSASlUgH+tN28ZWFFCVg99zSOKk7QIxJ+L8EHUdXvXq70K3kwe1Q9rEl+GpZV5uwB4NJv42VqAQ5BO8TWcbVhd6Jkb8A7bAe1Y/tFBJ3A77nmoc1jSkBR1FXbnekE4uVhRV/wC0DsKS5eRqvBM3cTSpJAB9d6bKuUrMyFe4qNIxYlJUpXlHM1j+1R/CpEe5oUvI6zlHeOvq+r6pgPq1A+NHIyLbGMOzE2yC1do8B4kba07pJ+38q2/qHdWenVt1QyVe4G+vwXFgOMPR/ZuD5T9Ox9jSSV6Ac22LiMMukkgApPHFaidVki7vL0hs6i6TKuK3IxfLd7li9xjAsQ0tXtm8WHdBkHeQR9qmvTr4RumWecvt3WZbpp25eAWoC6ca8x5GyogfSmi1FZ2IksnKcsLa1JSCBJgczSbwWmNimdzvz712Stv9nL0Dv7hOlakoMShjFVJk/nTPPv8Asw+hzeV8QusMdvrG8at1Fu4cxLWlECZgjeqLkUkWWDjqAdSeZI+aazQFNKJJBEwCr0opm/BBlvM9/hyipwWjym0e4HBoS4srQCjaNjtTVNZY99sjtCyrsTq4ntWXihpJ21K9OQKYNPqQkpAJ0j04969/FKUkpUmFxsRSpu8gTySfLmZl5fvkXKBqSny6eZ+lWrlv4kH8BuVLbtdA5JmqH1rCZBICoJ33r1DZAUTJB7gyaEuOPI6ZmzbvBfi7t1FHioT4gIBOgVP8K+KnCcST4anQj3mK0HSjQQko3PYCnlmdK0rRqSEDYDsa5n6aD8D9jbDMObrXG8XduWngUq3gHgdqVwbMN9aLAsr123kzCFkCtccLxt1pA0vKMneDualNhjzyWkn8QtpQMlSVQat+NdRMmzuCZ5zG0R4V664TzDhBP6+/61PunHX7HMlZpYuMRduXLY+VTaiVJAkHitcMq5xNuwkJvPc6ooziPUBLikBwJUU8FH5b1F8cZKh+zjk336jfFZg2KZVUnD3nGn3EBIKUlKgTVNWvX662SvFVOCJJUEn7cbVrejqjauWoYcbcAndZTt9OaxTm7B7lwAOAGATKSK4+T0ceRp2e56D9VfoYuH41K/k2Ezp1PxPM+Cu2LF9pbWCFBoRqnnjtvTLolm7MdpmlFgm/fWyIT4S1GABx9KgeUlNKuUrQZA7K7VKOnOIotuoi1J3lXNc7/S4du7k3R63L/VHK/Tv08OKKT+jefAeq37HabaxJco2BWo1Z2C5jsMftkPWlwh1KhMA1ot1mzWG8vOFD3hORCTNVP0q+IHNeUsWbQLl2+tAqCFbkD616HWXGva/+D4y++Wb/APVL4Z8h9WFrusTwhFpiyhH7SsQGnj/mMQr7g1rDmn/Z7YpYXq7jCb+2xe2CpS2v907HvOxq8+n3xQYXjKGGMRdQy+oQQswZ+9XPhubcMxRtK2blBChPNKuSDecMd9ls5p4p8HWfFYuptrKV2tobBepEfnqqX5R/2f2bsauGlYku1y/Z7FXiL8V2PYDafvXRBN4wsAh1BB4M0lcYtaWqSpx9CR/mqveKX7hKrwVd0Z+GTJ3Ri2C8NtPxeJqH7y/uoU4T3A/wj2FWLmXNFhlTDHb2+eQy02kqOoxxVb9TviWyp05snF3F+2t5OwbQZUT6RXP3rn8UGO9Yrt21ZcXh+ClZHhDlwf8Aq9qW3LEP+zfyHfij+JK66r467heGPFGB27hSdPDqgY/KqAfvClveYHM8GsFIKxyd+azTbeK2SuZ4G+31rphCHGqEk3I+avVeENiSCe8jmlf2mofwmPemLbZYMQSTsRFfLCwAYMDeuhO39E5RxZm/i7pVIMSYBFKsYisJ1Fcd9+aY6CjzJT7ifesHA4hGoT7mmqPjYUmwheYw4zp3KUg9+9CH8xOKlMlJG0nvSNzbPOqIU3qB3BHYUgMJffc06YSraVUU01oCh9iib11LZXrJnt3NNbjFX2yjUtRSrt6VnfW9wlTaUoIQnZQnj6UgbF5wo/d894od6dIZr5E14w9qSoPKQEj7GmbmKOulRU4dR2ICtqcXWFL1kBKtvSmwwFxClq+We5PIoynWBUuqMBibkqBJKxBBJ7V6cXcWvSZCYjbmsBhK21zpHH50q1aJDK/ECtQHY/rUt/tD1W2YuXfhOCF6p2rNV7qUFFXOwA4oXd2qkqQQoiQSSd6zDi2WQFiQDG9FSybqnkzVfFpxMqMFXCjtSTeLOBRBcIQSQSVUOfuSUKABAB21cg0OQtbrnCpPEnaqUtgpskq8UC1KPm0jkEyB7j0pq3erIJSuBq2Emdv50E8colAKgR3HrSS7xYBlUJAJmP1optoLWcEqbxlxJTqWSDsPN8tJqxjwzGtO5nV3mo1auySgyT826e/t+dZrGp5MSjtt+tB1eUCvBJBipIhKiSo8Tz618vE/DVpTsonfam+GW4WtK1EpgccGKbXocLqyjUfQAbilpeQ/CQUViZWxp3JggDVtNfHE4iFlAjiKZMWrhQoq+Qd+wpVFqFCe/BkUjtYsaCR+hqRX1BFW2KIgN3Vur3cYO/5KFff8ZQSP9zcEbbrR/wD9VGpfBzflQbr6g6rnFkAH8Iws9wm5P9W6AZxzTeYPl66uH7K4twE7rbW0rT77qFZW3VDPkijQ/wCIW5aPU7O7iLkK/wB/UkLb9QhO31G4rW7/AI6ysLtcwXzDZJ/dN3KkpmfQGrA6uYo+ljGLqT4r7ztwVLI1Ekk79p4rUe/zzjaHl6LpxtsSRvTVJaGisGwFtiObbR0OsZlv0L7KNwZ9KIX3UTqM3hr9q5my9urNYOph1QIP3jV+ta0W3U/HWhtcatpk7inB6r4s4nwjcAp/ikbk9/t7UFCUlZbSyAc+KcdzDcOvKC3HSVrV3kncio0lWhPlnSk/nRDGcWXjFwX3WoKdhHA3pqtZKSRG35GqqLSoKwrG6B4jilJOn2nmsm0EkQsmREHaKcMoKQpRTJ9BEV822B4hBAnbftReFVi5bdGCNSkQoEn5YHpTlgBhSNJAEfLzNYKbKGtjG1LNIIQFR5iNiexqOR9DttsKSDJSSPWl0WyGzIH1ikkoUoBQAEidPNO7Z0rUlJTB7xRphpjy2tAB5UbciKIBpamwQYMRHJrPDWdTqQNRCjsIqQWmDAObjn1PNJJvwAB2pfbcQErImeTT5/EH2ilR1GOQT2ouvB0stlRRCeSTTNeHuXYWEgCdojtWtPYUI2GNBZUF+Y87U+bx5DDrWlJIJAO+9MLbBXWXtCmynVsTEU8/ZAWFBSSghUggUXXyaqL2yHma1NogqWFEo5J/OkLHPqcFzabvdOlW0Ht6VW2X7Z60ZCUa9huQacWtm5f4hK5JJG5rRk37BdOyzc2dQLnNupKiS36TU/6QZPbvMLcu3GwtwCZUOKre3wxjDsL1bKURvvV9dEbhtWVXlASoJhSvf/TilnEF4wQTOLf7PxlkN/u1Bf8ACY2271ZiM343gGXGX7K7cSsAABSiY2qo8+YipeawQDpDgkT71alxcOP5YZCUnSEyQBNK+OMssGYkdd+JrO1ncIYRdtjeFHSZ+29RXOnX3O+LamXcaUygjllOlX5zQDHrFNvjyUFJRqEgelR7M6ShUAAGJHpSrh4k7oK5G8WR3EcRucQui7d3Lly4r5nHVFRJ+pprboIXOrY9jXqngkqlJn9KSZd8wgEn2FdDVA/kKJSlIVG/1rJLew77QNuKSQsLI1H077UuhYGw22rW2CMrTMPBSVSZVHrXuhPJHHFfK2Ox+tJl4AbpP+btRV+TbRkQmOdh25FZJSnT20/Sm7q1QBG3E1gHnAAkGRSu1oZfQupKUpkjnaPSk06G0kDdZ4nsa8cuYbERv+dN/FUCDAVA796LFWMnjjYWZIBA3n3pIwkxsI/w8UsQFCCSUneDSLqCrU3qneQfShdIzd6EXihxP+GTx6030lDZU4QVdgrcU7UlMJ1CTMAjtXy0KUqCkbCdqPnAzwgQ4wtokk6jElI7fSmRZeIlSJT6yJFH1W6lhSgD7AisVYaV+YJEdxQjh7FdsjDlqpayShSSPU8U0XbJeb1GT5uO/wBambOFIdCgtMCJIJpL9itKWqNWn+dYyvRAHLFw6v3YMGNuTWTeHLaBBJ1qAMRxU6GCBOvuDwYilGsHt0iTJPPmFNGdvJRlcrwVcSpBKvX1pNGBKVqJQpQIirJODtIIV4Wx2+lZNYey4jUlEKAgAikcndIWmivmMuOiFAEEbD6U+Zywo6VhJT6k96nAsxuQkA+ppQsICUkQQSd4ouTCRVnAClZJBAAkSJkn+lOEZeb1KUqPMYhQqSKtSojT8vbbg0mq2OriVT+dK29+QUBU4U2iYTB+VQSdq9XZN7AgSBHFEENKU/O5QOfWklrUhRBCp/8ATFNFfLD/AAd44ryB6V8J717WJHhAqjfivzHa4T0rxpAukNXRaCUo1wVSeAOavB0wg1zV+KXMl1iPVTMjHjvKtWlobDSlyAQN4EwKpBN5Ry8zyomnvVXqhc32OXGFHUGwBqVJ77xVSX5Dq1DVySPqKeZ/uivNmILgjS6RtvEDihS1agHCADG5prt0zpisAZ5Ja2AJBVz2pstJCllOqRuCRx9ad3ohQBVpmSIpFJSRPialFO4HBHpW65pFkYoKVgJSVJJ5HNLDROxJjgCsUlBE7gjgAxWAUkkmCkDYEd6W6eRXekOGmS5qX/GkyEnuKU8JKUmAQon0pup9CgIO/qDFL27qTpSSD6kmKyynQVhWOEIBkfxD3p3bsICU6RJ42pu14TiSor5G877URtblkfMN/XgUXFrLNdiqW1BEJAn0I3NPLeyShYkkTE7Um2+044CDMDgjmiTV026AQpKdP8J2ipt2ZEqynhKLh5BcKUInYRvUqxHBG7NYWF7GNIFQrB8YS0hK0qEg8z/Sjz2P/j9CdYjjc0tLyNfgJWGFLxa5DKVAoJ+9PXsBcwZ4jSDt3FI4HdeBcNrSQlPOx4+lSa5vhiKkiQTETNN0qLpG7EIeQ9eOyhAPuBE0Tt2ktolxIChHKd6lWD4EXUuKQlJIGwKhTLEcuX7uINpRbnQTEJUN/wBalb8g7OxxlTBl4hcLaCPJ617cYT+zMVdCUElJggjirSytkq+tMMQ6i3SXCmVBKwSKEoyvfLzE4q6twhBUN9aSI/OlSV3YL+CsMfxm5ZYKW0kQKvjobi5TlJZCiVxv6Hb0qNZyyFaW+DuXGpvWQTAV+lD+lOLIwdt61eUUpKYG/ftTyb/ahcZMc43qlZmKtASQsbx+tbLZSwZm4ybbvOLCypIJjbetV8yv+LmCUplsqG/rWy+V8YjJzCCIKUAesbbUIqsAdFHdTnRa5l/dCEiY71X2PvrfSVAkEiZPrU6zwg3OPOKWCIJ2IqC5jR4KYRumN+0CqLVUbHgixnUqT5VbxXyQQFQuNvlrIJhUnzDtWDXnWCRpPFHs2qGWNhC3MhMAinSIUCR5TPFJWjRSnny8RTpIlMECaDTFtVY2BWEqmFT3rIALRxBAgiOaWDMk7COCO1eqalMbfYUufI3YZga07jTA596TKNJCiY24NOlITMqj2pIoBIhJKI2iivhmVMaqbOoKBEevasQkqXuIjt60/atIRqUNuwG0V4WQVJI5Ige1N4pGtsYBlbaQFkk+kdq+aYVrUSrV+kURUjUnSATG3FfJtStHYRSVbyZUkMfD1H1CeRSiASQJBT7jelAwpCjHM80oGCBM7HgU3Wngzd4ZglsgbDbukDcUuGBKVR24jk/WskoUdJVBPGwpw00qQmKEgJjdTIKflE+21YIbAEEb+oFEU2qtEEQPeskWoUkEjehl6NfyCl2ytUg/pWJtTIJSCTtsKNJtOO57ACvUWO0wY7UUlEybWwELZzgx5d4ikl2qg4FAFI9Ox96kSrMAkEbHY+1JmzEjvU022NfgCNMGduBzShtgpQgiBymjIsBE7CsBZpO43HpTWlsHkFfh1pSdJEcARTV1lSJ32BCgQO9SE2gTwmfakXbDyTG/IEUaTkCKcdgQNjwwQgyTTX8Iok7jbbiakX4EiPKZ5ivhYkbQaVpFv5O2VfUmF1mCaocqdid1/YrPsa5QdeMQ/FdS83PhYKRerT5TOwEf0rqvi7nhYe+sjZKCSPtXILqTfJuswZiuUiEuX1wSY4hZH9K6ONLq7OXkzyI1EzY4XsVvXVOaleIefrQVkqWyUnmZ5ifpRvEALl+4W2J1rJKiIH1FR6FW1ytJIImAmNgKR/uO2OhtcslxxQbSSSST/Wm6rVwEpCSFRyI29qn2TenGP50xJm1wbDX8Qu17Bm3aUsmfpxUxzB8OOc8s3ATjWD3OELWPL+LYU2F+oBIg/b1ppVbyVspNFu4SlKgdhsfrWarfYpHPAHrVv2vRXE3SUANADjn/AEoqz0ExVaQuGVbAHkUtYJt3sohFmoo2QSod6Ut7dUEadQO2nTNXsnoLiiDpDTSlGZAUft2p5b/DzjqglSbRlweoXv8AyoJVhjWUKww4rYNke/ECnzODXTzZVqKRz61fdv8ADrmF1Ki3YNL9QHN/yinqOhWZrJkD9mJWUgeYOJgbVpNPTCvo19/Y903CwFEfXas0Wty3ElRAEzzH/KtgXujGY3m9sLIIPOoRzQTFOkGYGWSFYYUKJ2BWkTSKWKezWvJUDD7yHCEkjtFH8PN0pCUnf3FSa16S5g1kqwxahMTINSTD+m2JWyEqVZOIB4KiN6DfgzIo1eXzDJUhQ0pG4I5p7bY/foUkDkj5inepvb5LvUWhSu0USoEcbGh7eTb0LJNovbeTH+tNBuxWkC7fO+N4eEFtSeNtSOaftdUMeUlMNMcjzFI2NPrrLN04GyLU+UcADfasrfKbmnUu3WiPmGmQPrWlElrYUw3rzmmztizrtSBsJa5HvvTDFOtWYrnX4qmQfVDUGhl5hYZKgpASfQ7UMfwpAagqCVQdjQUUjJ+T7EurGO4gotv3GtExpnn7VlYZ2vUI8VtKUkcAn9aiF/YhlzUFhR5B9ad2TTrqESAABufWmaeykaJcz1Dvrm4BWykgESUnerNyp16vrBpu2fQFNJGkTuYqmkWpQglEAq7CvG2VoUCrYau+01NtsNIvDGM0s4/eC4bB35J5NAsWuQ6kahx61GMqLfeu22AFL1mE+5qXZkwW7wgt/iGVta06kFQ5rKS2Tpp0yJKQkKUkDSmZrxLCEqAgz2BNLrYB2O006trYpAkagf0rNYsdM9YSpAAn6g05CQEyeKzZZ19gAaXVajYJTv60UrVoTTyNwNY2rxQ8v86ItWRKNtz6Vg9aFPtHas3Q6rwDCjUIjavUtiDtpA9KfCyKthJNLHCnQ3q0k+m1ZYCgYEbFPbivkNaSJE08XaqSogjcbb181aKW7EQTtQabdmbGaWQ4v+dPE2wLewMnj3p49ZNMrS3rBI4E14pBSSPSi8oDdDBdoNUcSIpNVqUqG59BRAJk/U04at/HKU6eT+dDwBfyDGGFFQBTRK2w0rAVG/FFbHBipwQmTMCpFaYEpWnaSfShb8mXu0RZnDDOkgEz60/tsLbhQ8MDtuPzqWDAtAC4Orn7U7ssvrSUqKCUK51CinSwL9kKODhCCrRCU/nXtvhaSmYJE7RU+uMAUtQluB39KUs8uh1ZUEiNo2pY5yw3RAbnB9ZKg3IIlUb0ijLqlkgNnfeZFWk/llKWVEDttHFMbfA/36ClBHcgDenMlSIMMrEpISmCAI96ZO5XW07oIMnjaKuhnAkqTugJHPvQ69y4UvpIEE1KesDZ0VgjKD60ARAiZ2pu5lxYc06ZMwSN4/Krts8vI8JOpAUIMzWX/he1UA4GhO/I3G3apq0wpFS2eSg82A6stg9459p9ad/+AmWydRUVEyduKuS1y5aFqCEkniT3oVeYV4VytIIgH3oXfgbqpLJ0JJNZhUUnI5rJJPNdZxpgXOV8myy5fvqOkIZUok8cVx5zQ6bmxv7hR0qcLjhIPJJJ/rXWPrXiq8H6dY7cpA1N2jq0yY3CT3rkXmt9LOXLiTv4RMz966FcePBzp9uR2a+XDGl11IAIO30FSfo/0sHUXOtpZrH7laxqPAAqIPXJbcUlKjt6HtVx/D7mRvK+Zba9WRpI0hQjbf1pJzcYtrZ6MadOjoxkDppl/oRhllc2tg06/pCVJHIB53qx7prAevmG/sG7YatS60fKlUrSB7Gojl5LGdsNtcSQ6LtlaErAG6Rtwff617jLLvT+8GYbJX4VxlKilJjVuP5V82+XlfL9F+sOtvZQWeOg6uk3Ue8wJ7ETd2y2EXdo+tAClNrJEK9wUke9Vhn5y7y9iARb3gU1xAQkwfrFZdZOuWJ9Rs+O4q7cqBZbFq0dW+lJPpxuo8VV+PZqW4pLl26V7HcmYr6CLSgr2c7TvDJKvP8AidsAFXKFKjYlCea+b6z4xZGEm3XAMamf+dVgvM4vbjwmjqJ5I2ova2nkC1mTPpvS9ryzRVYZZeFdeMfQr+72uk9ijvRxrrzjiwPFtLQ8bhszP51W+F4cbgjQlJ9jzRlWDPMtk6QYHE8UrrbC1m0TC468X6HCE4XbOHsNJH35oPd9bcRfXquMHYSCNoSeajTFqtq7SXEDTM71ZeGZUwvGrVLob0Ejf+tEzV4IUOsbzMKOFsmTv5qwPV9u4XCrFhEq4BmPrUlz90bYTgZvLGQ4kaoA5rXjEFKsbhTK5bdRvB5op5BSRd7XUFm8P91YEA7AmsV5hSo602TUHtqPFUYzjy7VzSFK3IUd6kuCZ1A0tukRG/rRbsXo3kujI7ZzjmC1w/8ACBDLigFlBMkT2966a9PsByjlXJVhhVhh1naW5YAdt/DBUskeYrJ3UT71zf8AhuzPh7WfrPxVo0O+XWYOn/r+tdH73LTWYsJZdwx9FspxAKbhIOsD6jvXHzcnRWsstx5wyk+qXweZZ6huXb2FaMPdSDDjAEokTED+Vc9uqXSrGOnOacUwO+QHH7J0ta2gdKxAUlQ+qSD7TXXa3u7fp7glw9eOtNr2UXN1FcCO9aGdQOruB5j6jZhvLxpxaLi4KAU7iEpCR+UVvTczkraGmvCNInMKuHbpS3QpKAfl3+lSXCsvuutDQkgfeDWyWE5LwPOOJvPaIZRHzAeaeP5VCM8YlhGWr1Vnbtha0GCEJ2BFdTnKeETtMrdnLzu4I0gjk07GGWlumbgp2EQab4jj93fEi3b0J2AMb/Wo1iCLp9cPvmSf4juaZR6umC0SwZus8Curd62AW804lxA7SDP9Kv7qznbCOoeQ8Ju7O3Sy+1DpSOUynzJ/OtTbWxHif/UcO81c1u2G8sMiNIO5A27VnH/onJKwLbWKlOTPl9DRZnDTO43mN6c4W3rZkGSNzRNFkRJUdJPFDeGOsAs2BQoQANu1OmLKI8vl5JVT9Fpq2PHMzzRGysEmJSCCZEmh2aQzyhrbWsN+VMBW2wia9dw9LiQSCd6lLWGBKU+WSRPtS6MOSpKVQJmNUUt0sIVP6IjYYWPHBCAlHZVE7/B/wtrrI1JPeKkTOENpXJAM9jT/ABPBvFsVIQUhQGwitbfgP3ZVDtgFyQRHYDmh6mF2zmwJP0qSO4eu3vFoI1AmCB2NPF4c2Up1pCu5BETRjcV7kM0iDizW7deIsGTtuOKkK8LSrDkr0DWdtxUkZy+062HGkBEDgbisBgtw9MQlMRBoybkroHtIzhuAm4UdUQOZp/h2ChvEEgkKCTAkUcNsmwISW4TxNK4Lh/7QxLQk6VncT6Vk8CtfAXssBSsIWEgk8TxUhtMD/wAUpQmPyo7hmBItGEgqAVG9Gm7ZGgAkEjbii3g2ERVzA2ijSARBkBI43p9bYcLdEDuIM81IHGGi0YAkTG24pu2lMKMggUsZLwHK8AZ3DQoj+KTsBTqwwJtsjcAU/QpKCJMqAr21vdDigQNMbxU+6cqFbxo+dwhDZSEgQRvJrFjDLdDihoG/E1l+IVrUSdh61gV6la2/UTWbawhuqlsIPWNvpCgI7TTZ3DEPqQohIjgjcivVXCk8mUj32B9abrxBxKTEJH1pXjLM2qwPCbdhRBSII/6NJXPhHSUAQO9MlvKUrzkBZ337UgHySQo9tiDQavWzZq7CTi0+HCCAfmgesUxN61PnCdXeaZLvHCVFJiNhSP4xDxKiQDO4VzWcb2NfydAI+9eisO+1KA7V1nDHJTnxV4mcL6NZmUCE6rNaNRAI8239a5N9SLsWuWLgEwVIKQSK6g/GxiQs+jOJtRBuFNtaiowJcT2rlj1dfSnLjoJ0gkcn33rqr2I5+Nr8kjX6+u4WVKIBPGrirW6YYO7i9joQpR1eh/KqXxNWl0RGqTA5EVenS3FV4ThaFoTqSrSVbbpMVCUV4PSd7RfuQ+qubej9ooIZ/aNukeVtxwoIE/Qj9KCdR/iYzh1Gs12bq04TYOHSpDThU4oemraPsPvUYxrPNxidr4Z7Jjc/w1FrXC7jHbiGh5BB1Tsa448Ee3ahlJvDEWLhakhIBDYPNRPOmN/h0qQl1QJkQDJA96nmZMG/ZtgUpdU2uDMDn2AqtF5UfvHCtSlLCjvO5irxTTbSGxojuC5hcZxFCRq0k8n0qxM1Zvaw/AUOWy9bpSANRmTQBGSPCQeVEH0ptdZXdUrSoKj0UJH1p6ay8AxY5yrnXOd+P+GM+Iid1LTO1WTgrfVPFkbWbS1ESdLC1D6bA006EZ6wvpvmawXjtkVYYlY8V4I16B66f+uK7G9FcdyXnzKdre5fFlctaRqcZbA1GI1GAOaH4+ytkeTl6uqs5VMdOep92gLVgr0GDqTbL5+pqyMjt4tgyEYfjFs7bvqEp8Vson1j1rqw3g9k2DpYbEmflFa3/GDlTC7LK7GMMMNMXrdy2nWlIBIJMj7zRjCLwSfLKLTawUzgDbWJWDlq750qBSQqK1V6/wDTNWHXT11ZtlK0yQZ5FbE5XxnwrhUmEmCd+KAdWnrbE7ZwIKd+/ekjLqzpw2aHMY8pm8Db/wDCYV60ft0KdT4rSgv3HpQ7qdl02OKuutlSQo7QNqjGEZqewo6XVmODtz9Kq92G8YLQwLN2IZXxFi5t3Sy+woKQqJBPoR3Fbu9I/j9uMKwtu1xCxe1pSBKEhwH9f51ojhLttmO2Cm1w4Ezv/OKe4DiT+CX4YuPlPB7GocnCuV3oKnWzeDq18ZL+f7B6xwqxdYW75VXFzCQkeyATJ+u3tWsmMOLauGV+IZKt1HknvSllaIu0JuUiSQCSDvTPMSVFbW8ALEVSHFHiXtE7uTwWxkC9cbtAsLgxBk7H0qtep4bZxJtwKSSvUSRsTvU2y3dCzy2VJUA4QUieQf8Ao1WeasBv8euvFU8QhHyoG0+u9C8hf0wVbX2gQoSgDtzQ68Wm4WFkQocbzTpnBbq2JCgSIgBXam7qVNxICO0V0pKrFp2L2gbQAY8x4j1qy7aV5dQv0AJB+lVS2pRAhRgGT9KtDC3S9ldMqPHHrtSTkupuuNh3LiA5a8c943oulIcVHYcT3oNlZM2eoqnYCR60bQkEjeCPU1z4aKYoVQgoRAA0d470RsygqEmI3AFMW0eSUzseKXswppRVPJ2B9KHmhL8khZWkJ8yp7fSlkOJUkgdj60HDxJ2kg788U6adniY9jWp3RnTyGrNSQpQ1AL2jelLi8SglJII7b80Kt1KcWVBRBGxJFYvPBIkyZP5UFfkzG2IYaFqW+gwSN1RMVGbm4cXcaBMAxUu/FpcbIkj0E0MVg/jPrcQoD1im+h0EcKuNKQ3HsRFP0PIRJnfsRQ5DK7RYAV/7hTtspehRlKxz6UqtiOKWhK/t1XxQpCAIO+3NGcEw9qzX4hACvfmkbNQKyAdpp2X4EEeYK2g701N4BbZIHMSKQAkbjnvT+2unNKVCYPbtQC0dCkbg9qINPeGg+aB2kzSyk1hhxYWdxFxbB0gHaI70wF+pLSj8pPB9Kau3pEme+x9BTJT4dcW3JCY5oVkbIXZvm3AhROo++1Ym/b8YDTt6ig7riEL0IVKQd/Q1gm9KVhWkQdtvSp0oPAc0G1YhsZP35r44iGdKeUqG/vQZV0lrUCoEd6TTeBcRBA7zVKzaEla0SD9rJCCeRPApFV40uTqCVH1NAvFLiN1EKT/Ee9D7i7LRJ2JBmi42g0k7JM/ea/7JQSsiZ5nehl3iK21fKomOQdge9CVYmGxrCgAImd96ZP4424YKkgz671OKrIWkSBFzIBUoNkbBST2pspxnV5lpJ9aAP4sjwVSvb27UwONN6iC4JBiZ5rOVaGzR1LKieBWWrt3rBIj0r2uqjzk2jVT4+8QUz06srZKxNxfNJ0knYCVf/wCv61zA62XGnC225jUsJk9xXRz4/cQ/4fgdptpcuyrST5tkHce2/Nc0+t75/wB3QSkpSSYHc8V1O1RPgdt18lH3Z/3oBShGrbarlyrotcFagQNk794HNU9coQq5YSjzpBEpA96t1pKmMAZUhIlUQEnmklFLbPTTtEhs1OYxfM2SSYWQklPatgsF6XLwbLiHkpSXlJB0ESeP51UfR3Brdi9F1euJSoqB1HhMf1rYNPUPDGIb8dKtOwRXNK79rEbTx5I1hPRG4zLdhy9X5CZiO3cGrAsvhcwdLJ/eJSqJhCdgfr3/ACqTZJbxrMlol7CsKdeZIK9Ugbbb7keu1SpaMx4Ygl7CHggDzKRHl29OaVqSVg7pYbKFzR8PbeGBamYdSBz7/lVYYh0/RbvLbcSnWonaIrZfOHUuwwvD7gXCii4QCFNuJKVA/Q1QlszmPPt5c4jY4c6MPSojxSICvp6/ajxy7ZkCyN2WR8Lt7pLj7KSQZOoTFXd0w6znpgQnALS7uVI+dvSEtH3/AOhVQ3a7jD7txm4TDyTBnc0ay5m5vCCpLlqXQrfymKPJ2bX42aMoy/cbCO/HjmpNwU/sthoJ2IUSfpUN6ofEhmHqXhiEXuGFqxSsLV4baiJB2OreKprFsQViV8q68NKNRnQBtV69J+tOVMLwlOGY7ZeEpQCVOlAUhX1FFSaiK4w2kVN/44StubfUlwDg7TQXFszOXrbgWogzv/3qddZ15NvMXbvMvIaYdWs6m2NkqHrFVbiTbS0nSSlfJrKnsaNJ2Vd1MSLlC+52kk8VUDGEu4vi7WH27ZceeVoSj1Mdvyq1s8LLaVtr8pO8qHP0p/8ACRlNOcfiJyxZKbD7SFuPOpdTKdIQU7x6lQH3q8SjfVORXr+Wce6Z4hbLvbd62acBUlKxMp7we9Tty3azTgwubVQL4TqH+ldcevHwk5X6v9MHMHVaN2V8y3rtrpCPO0uOR6/TvXI64yZj3R7qLiOTsaYUi5YWQkJBKXUfwrQY3SRVP9ujlU/ybVMNdO8yOOrXZPnzpMCZqQZpWpu4YBEjWDA371X10h3AcxtPoBSh5SZA7Gpbjt/+Jatl7zq/Wuadq6OhZeCyMHKFYUgqVCSDsKY4+pKW2w0QkDeQOaywQ+Jgu5laYlP86GYvcwkBRg9p3mpeAu7ywa4+twkbEe1B8VYSsBSRBj0mDRB56NOlJJOxpi+kr1BMqn3qibWgJaojz+ptYIG52M1ZOW9TmWxqBmJEmq0xFZacAAIA7GrGyc4pzLyp2Pb6UX7o5ArTZIctOabeNyJ4Ao2lxTh06hsZ27VHMBeCUKGoo9KMi6CHTK+TuoCp0N2t0F2HlKSUgjVO005ZV4a5WQQN9qCN36Q8d9hwadDEA6rSIJ4iKZxzaYmwx4viSsDSI3Bpyw/pSlKd0xBNAjiTY8ij5jx6Vkxig3Snt6UnkbfgkibgJWCPlH60o6tC9jBMeu00Ebu+5VO8fSsDiSAtQSogc+s0yTNlMLpTIWSCDwKc2aQkEpMj/CdqGsXyNEFWr3FKtX6QVd5O54rRu8jWx8t8HyhEz70rbuIBCTIA4neg/wC0mQTCwqPtWdviTayFJkAf4TSpq/s1vwH7ckuKIMg9htSy1Qd/XsaDDEm0agCUnTPakHcyNskpUdSpg7xFETZL0Ogsc6THI71kbwt2sBcqB/i7Cok3mFoCEr8vvXxzE1tqUAAe5ogJCq+gKkme3cGmysRLYWVHSPzqLrzSz4xUlwER3nb3ppfZkRpIC/fUB2pUuzY9aJcMW1IAA8225r57GUtpSFAc8g81Xa8yqDykqV5TuPrWLmZUkbHcSAoUVBt0wpk1vMwJYVuTKvLzTYY8rQYMzuKru7xpxxxUqk+s1gnHnk7FSYgAE7ml6NDN0T67zS7bWoXJ3JhI9I7mhLecHC4QVFQ7z/SokrFnLgFClHTsdzSP4otzG+3NN1cdmq9ErxLMhcb8vkWZHFRxzHVpV8/mJ96HPXZUhSlKJNM1LIIO8HaIpcfwbqkG3MyOFGy1DkT60OVmFxSiQQN+9C3lGJgesGm6HG0p3SZO/FN1TwkaKR3A2KZ4FfEiOJpMAJ5gCslHymqUeX2waH/HpfB3NeAsTsEOrH6Df8q509bXD+Ntkx5BMzW+Xxt4iLzqhZs69ZYtT5I+WVe30rn51gufxGMBCjIbRIEfpXTKVSUWU9NXUrdoH9qNBSQQlc7cEDer2sMHF9hVkEp8ukGOI2qicJCXcYY7lSu9bi5CyoMSw+2WpGkeGk8eomtfZ0zq5JdFZWWdcecyphqWbUQ4EgRMb+tWj8G3RnHeteb0Xd6txeHMhK3te3lkEJ+/8hVVdVsq3GKZ/wALwxtOoLUQoesH0+tdcvhA6PW3S7pdhyPACL65bDz6ymDqIG0RPpQcUsvwQfKuvt2y38rZQw7K+EMWFlatssoSAUpSN6bZ6zDg+Tcu3WJ4p4TVqwgqUVAD9TsKkVy6LdlazwkTXL740/iMuepnVdvp5hDh/Y+FEOYgpCpD7syEbbQkEE++1JG3c5aRzNK+i2xLO+L2XV3OFzi7TRYwZTkNhY8ziJmTU/zJ1ay9kDICMMwK3aexFaNDKQnZGx8yq1/YzBc4Yym1bMIAA9IFCsSfW88VrVKlb1P/ADXaOv8AH0ioiv7UXiF05cXStTq1FS1K7n2rJTyC/DfY9u1DAoE7kfnWa3NwpBg+3ancUgtBdy0WlqSPL6g0glrVtG52kc00OJPkBKoiO/NZM3JbM6t/rSRTXko034HiLBlm7YU8v90SNRjgTWyWM/DphGaemBxTCVJVibbYeZU2JCxE6TWrtw6p4AKNXX0j613eW8Gbwd8l23SClHsDtvQnaaE91X5Ncer2TX05VvHigt3dknxoKdygfMPrE/lVo/7LPCMOvurmIYjcPtpvUNhhpJIkJjUTB9TH5VI854KjqFcNYbYlsXF2tSFGRskgzP61WuUegufvhn6nYBmqwuWzhq3CHAle7rYPmQU7HuCD7VaHd5YZu49Xs7SAJ8P2rRT478vZWuMewnEQ2wnHW5QVogOeH6e4mpXmz40WsMyTptmkuYu6nSkK7DuSBWmOas4YhmvE7jEcRuFXF04oklR4+npQj/bshT5GsVRC89YM3fModZQAWoKdt5Heotiy1NWTABJgySORVjMI/HMFEagTpqA4/YusMolshCHCnxANuSIpZftLw9sqJnlS6WcNRPEfmKC5nuHWbtA2SnmZ70RyujRhrSQrzJTEdqCZwBKgTsQee1TT/wBJdpWObG4bWzp17xyRNMb66Zt0/ONzzNRxd2tkrhwgRtH8qHrfW4o61n0g7A03Rr+AWvA7vb4OGQmVcb9/pVk5Ac1YEsa9wO/cVULhS7qE6tParQ6dOp/YZCRBoSyZhRN+bZS9J4596VGKuLAJPPahTypeXMIgnzdjXjailWxgHmaZLAsVGg0ziyiqN5G1O2cX8JQ8351HEODXrTPvPalg6koUNW8cUqdo1ZoNu4oXF6h//jxWbOLLQqdUTQRCyEyDvHFZKXoE9+4rOqwb6JMMcKhGoggcg03cxhSlyDB/lQJL0J1GN+3evvFnjj1Nbt5YyVMkTONuI+VcgdqdN48oaiVHcVGEORwZ70p4gAIUoD3mmTjdsF1hBN3GFgkhR825pe2xt0FJKiAD8s0DC/Lzt61kHQIg88UOqtsLwSMY28NatUap70yXiTjp+bf1FCi+rcathzXmvfc70vn6AqathhGILQr5oEUjcYmsggr+00MU6VbTzvWPO439qdVeMAaW2EBdKKVEnaIia8/Fr8I+g2G9MPECTHt3rIKJHMH0FSuhllCy7grkwSo96RW+r0UCfSvtekRyOPUV9ASjcT7mjHKuxbjdGGhRPcRvSnzNmBqg+lYOrgkivA8CngH0orCpjVbs+SsgfLpjue1e+KrSSCZrxS0k/T3pou7KVKBE6ePehmL+QeR2nzc9/WvCjS2VJTsCB6701TckoJMwBIpJGK6ZCkkbxQrzQ1ji4bSUzwaHLOlRmT9qTfvC47CZiZgdhTZzEihahKee5imS8ICa0juaQAJj7UncqKLdxQE6UkwKV1Gd5M02xF78PYvOFUBKST7CnW0eVLTOZXxZ4kb7rDiIQslLTKAN5A2mtEup5U/mF4pVISJ/1rcn4j8SVf8AVvM7kg6XAiRxMVpXn14O47c/MkSUzNX5L7HV6WLUEyO5eaC8cYbVABV2EzXQPpRh6UYA2sN7aQQT7CP6VoVki0Q7mG31OatKgZHPNdBukjYdwFQTtpTqk9tqWP7t5G9R+y2D+j3TUZ6+IvCnHbdLrFpL60qGwAP+pH5V1HVcWuB4YFvuIYYZT5lcAQK0p+F1djlzMON4/eLQ2pIDQS5EnaSRJ96oH4wPj3xnO2I3+VMj3bllhbSiw/iLC4KyPmDZHInafrFWlxp50ebCTb6xWTYP4rP9oBlvJWH32XMsvDFcfWlSAm3IIa2gFZ7fTnaudnSu7vMXzTfYjiCi9eXLvivPcSVGTUTwLADid0gq+ZRlSlmVEnvJ5q37DLzOA2iHLdASoAHbk1zTna6rR6XHxR4/dLLJljttrlTStwBz60CW8v8Ajg+5o/ljCb3M6gLNkueh9T6fWm2NWysNdcZfbCHUfMkjealB9XR0zrr7gI6QIV+VeWbraLhJcEoKtyfSsbjUtEgaR3CqF+MttwhI8vEdqq3btEo1ZNMdvLBdk2LdIS77cUB8UztCjPftQ9pSzAUmCfeiFsNSO0Hk+tLGPyO15FFr1AbxVpdBMio6g5kdtSr92ygFQHqf+1VuMN/EJ8hB25NTbo51AX0wxi4f0ki4GlRHtxQkrVoW7DvVrCXOjPUqxTYvqLob8fQo7Aao/KhGdeumN5wabQ+hhCWkwkoG31+tCurWc19Ss6KxZwQUtBpPuJP671Gf2UAhIMERwf60E3FW2aKi0uyG79+7eK8R1anFqPKu1IOOFvzE/c0STZhABUN+9AMy3SbW3kdz9Kipe7DKk1yJYNX9q68pSUjxQNzxtVf51xD8NdXdslAA8VxvVyCAox96Yqzjc4Lh0MCNahO3JoRj1y5crRcOCVqVJB9TyatacaJqLU+z0TTKTyVWSQCnb0oF1BuN20yABJNGcqrH4WJlUSBUb6gqU06FHyydgPp/3pOzosRwKGgEKkq7e9JusqCEuAwfU02afBQdiNW2/enCLxIYKFfSD296pbUUydv4G7vm31DxZirM6dkt4UtKlTAmO9Vep0lQCxPeB3FWP05WF2jwJJjYSZo1i6Cxy/dFNy4DurUf+VY/i0hIg7nkD1plihUi8dCp+Y87Ugm5SVoOwUNvWgsqvJNu2GA/KkAHnufSlEuhU8DSTFAVXRBEKBk96WF2gKEqAHYA09KOysQyl7UeY++9LC4B+YSkcGo+LtSzqAn1k8CsvxxSlPmKZ/IfSg0kqSEll2SIP+QEQARtWLrw2APfkUIZvw61pBE+9IO3eoQlYMcDg0iXgCpskJeHzA88b14m41bDyj1oG3e+UKJk/wCE0um5BWECNJ4PINFpvwUtJ5C3jkIO0iY3rNt7UVHYgHaKBqvS2oahwYKldqzRe6iTzPb1pqtgutBtSpUrSe1JKf0hIVBoP+N88qUdJG4HavWLnxJSqQkfLPpQcWldCtpBg3ABMxHNfG4SkeU7e1BV3kJjV3isRclS0nfjt3p1FdU0gTyFRdlZQQZ33AFKi5JCpHy+lCLZ0qXqCjtzXqrsEKgwRM+tTpVYd4CCcRlwgAlPsKXReeIQCoAUFZuSF6QSJG5pRL+lwbyO20UetqkZ/KCrj4MpH1+tYsLCdwqQfam3iJWkAKB70g/dFjTpVB5iea0rWwU3Q/ff0bpO/wClMBcSoazKjO/Irxy6L4AIMK7J3rxSEp4BJ52NL/8ASGutigdKtgQhAHJFNfER4m41HcAisidHA1COfSvWEpWop07kRvT5lkzkkIxrXIUpROwFJEFJMTHsZp6u2DTjZVsTtt3H1pF1oJVG32op9XoZKzuUfKQEjVvvvxTHHbo22EXSyJIbMSYk/WnyPKoDkmgWd7lLGXLxSlloBtXm1aY25/OKMV7keLN+1s5PdaL38TnvNL2ogG6WJPqK05zlca8WvlE6iXDG3HtW0fUK/wDxWIY9cqMqVcOrUVDk6jWqeYHEu3VwuBpKyZ9N6HJ27PB6Xp8QSHXTtpT+PCFAbDiPWt9ekF04zl+4UslSC3PHYitGOk7CV42DwAoQe8g1vB0exJpyxurJyAVN6YJjn/uakp9HaQ/LDtHBXHVrrLf5TwjGMMw26Uyu5H4fUiNQ1Dzbx6GtZMPYOptRBUYn671N+ulnd2+fMQt31nwkrC2t9iIj+YNQ/CnCyjSkgkbCN6vKUpbJcHHGKtbJtlxfgutlBhW3b9Kt7DLRV9YzqJUlMmR2qpsr2viPNLUjURztt9qt/AnHkLabaRKleVKR3PpUqemXdbRY3Q3N1plHGPwt8n9w58iidpJpl1wvsJv8zJusP0pLg84QdqqvNmZFZbxdq1u0G3cWSU6tp+lJ2jox++aWp6VFQSVEyO1FJRfuEaU6Y8fV5DAkRsBTJbcy4oQPerzs+hdvi+W2ry2vvCuQnUEq4UPSqzxrLysFvXbO65EiUmspJ5Q1qLojbRbchInV6D0p+20oJSkJIFSDLPT/ABHMFxOH2pcA5WRtFEM05DxHK7QXdgIUQDAFFuN7yI5JOhXpxkPEs7Ywi1tk6bcH94sjgUR615CtenKLeXgt92SlEydon+dJ9NOqdxkVDpbbKjO2kxq+tVP1a6q3WZ80O3F8/wCK84ohLaVbNpnj9aS3bKpXhBLBNV9eawmUgyRUndbCVqB3jsRUbyxitpaWCFKcQCsTzxS2KZwtGEK/ejUATsd/zrnk3LSHSoeXt0lhpSlbBPaKpvPObBcXzNq04YKzIHf70rm/qYXEratySD5ZnYUz6DdJcU699SWMKb1osmz4l1cpB/dp9j6k1Ti42qQZyUVb0PsRx21ewy1YABWVDSdPmCh3r7FX0/h20r2Vq2M1bXxFdDcP6RYhgNvbO6n7gEqtivUABHmqp8bt0o8JMKJ1SSrc10Sg+OOSPHNcmY6JTk9wJaMyVRzUc6guj8ZOwH8yKKZYWWCrfmgue0+M6qTsTtHIrnRfyRRTxSlKNIhXvxSqXSE6RAHfbem4QWUKUQSrYhJHNYodKZERO/M1VO0RrOBdcJlaRJ/Op90zfhm4AVtt9qry2UVSCTvxH8qnXTk6HbkGQk/lRSe0NSiO8dWTeuKJO6twaGJeSvYAiBRDHW4vHdRJE8nehgbXtuRAnbg1SKxfkm1fgXtFqUgkkqmR7GszCdvlJ2kifrSTRiAN5GxFYiVqVsTBiKKzhhv4HKFtu6oiY2IpZtBda9I23P60zYYWVKQGwADz3inSEuMyk77+valUs5RmvkxU7pkAhMbEprxolSQDG888xXryFKVPCe+1ZW7CiDCknkmmpPIrbo+1QtQUQCBEHvTltwBJkwojtTZ5ta3IVtIiQKySVnygT6mg6bpBWNijjmvzaSrc6fasAdPqAeYNKNJ8xBSVekeleONuI1aQABwKPVozbMVeZRJJ07RB3rOPKSCfLvHrSakqPEieAacW6FaSkTq501O7xRjEKiDpkRvWJVrIIK0n1HanKrQp3k7c7c14uyKlI77bGi7SoDpnjK0ttAeJIPoa+XpSD5glPrGxJ4E06asCEglMGdoHP1rNzDitwJPCux7Ub8G2MEq1SkmCkRvWbTSlqGokz6nvRe1w9nw1pUneZnua9asAFAK29IpYtJsZpSQwW1EjUQsCCeKw8LxCCJOnkGiabLVsZUk7wd6cJtEttJjYfnFaot5Ar8sDIZKkkpMDjYfnSzNmp8bJ0z7miwt0wSQJHtS/hAQqN/0pW4pUamgB+DLR8MJ8p50jmlbeyX5VAQobbelHPDSZgAGskNJB2E+lBPAzyCnbFTigFcTXqrFKzKkgniZoqqO/m9qxCUp2ITPtTSGVeTs9rTICSZO+9Qvq9cOWWRsTfCyAGlaiEBXljf7EVNFeJJ2ISNjPf3FVZ8R2LKwrpfjLqTxaOpBPOrQYBBG81Xj/AHI8Pl/bk5M5vvP+H3Tydw4pS9zPJJrWHFXC4XCd9Sjwf0rYzPjyrfAlgAzoJ2P/AFNa4Yo4EqEGQe/vU5dnJtns8K9qwTbo5bF7E5bTp0qglQmdu1bDYPjruX8Ut3iogA+cD0qk+gWHO3OIubajyBP2rYrHun76cOFyVQYmI/51NN3RWeMnufen9r1Jw5u8tG0uXyEFCtJMqHYxWsl1gbmX8WetH2ihxtZQtKhuCPWtpeiWbE5bzjb4biLiDbXCg1reMJCpkT7dvvU7+J/4e2cTWM4YFbk2rqEruWmkSWzxqB7g/eumMb1s4XP8c+r0zWTJlmG0pd0wI2JPfvW0Hwp5QsM0dRWzfBtxNukFtp0+VSjM7fb9aoXCcIFgEo32AB9z60+wjrk50M6g4JjKU+JZqeS1dII4QZhQMHcbn7Gppq9jzuSpGzP+0x6OYVhvSxrMViyzaXuGuJcSWgElc7EHb37ek9q5x5e6j32Aqh4FbcRW3nxbfFS/8R+VrDLWWLVxxtxxK7x4KBQG0qBCRsJJgbc7GtSeoOTbnLuDpddtyhJUBrCYHpVOV2k08k/SRlCLUtF15D+I1+3S22m+LKIgtObp+lPsYzYzmO/Nw5cJUvfdJ9a1bwlvxlggwRsYGxqxsv2T5Z8rqiTHfiuZJxeDtfVu6NpenvVhrKFh4aGW316YCivTA9/WmOcuot1nd5IeU0lsDZLSf0mqbw+wuW0qUSQR68UWtrxVs4lU6VDmBt9KVR91g6rwh5mnFG8CsSta9GoQk+prX3HrvFcbxe4ubOyubpKCApTLKlRzB2+lbPYRgGGdQrqzw++UlLDrgSoKMEb8+1dIOhfw+ZByxk21ascJYccUP3i3PMpRFUUbzYnJyLjXtRw4Ga8UtghC/HbVGnQttSSKRvM03LhSha1awN9zP613uzj0b6dYfh79/fYBhbBSknxFMJHrufWOd65SZv6eYV14+I44Pkm1FzhNvcFly5tmoSshXmPvGwn8qquBSVxZGHqnKXVxKr6RfD5nrrjirdtgOGvKs1KAcvnUFLSBO59z7V1e6FfDxlz4Uellxd3hQq8S0Xry7eMlcCVREQOdvarj6PdKcH6Q5JssPtrVDBZaAXMEjbfetNPjl+JM5juHchYG+Rb7jEFoUCNP+AR6996rBqOEc/LKXK+prJ1o6ir6vdTcTx8pUiyKvAs2lH5WQdvz5qv8cHhLSYIkxsaMWrTaGw2oDbb3obmNvQ5soAA6gkiubldvJ6HFBccaHGXHC6sAEhI7HeaZZxPhOEmFGfyp5lVaG3DJEzzQ3PJCnzBIUe1SzGmV8kPuLjW7CVaQBtI70yW+WxKiAeOOa9fd8pk6iFd6b/2hkCe2/NUUvHgCjmx22+APKPMO9Tjps6Xrp8kmY49ar+QmFSCQd/SKm/Tt3wr9wgRI4FC/+g3ZIMXQReuAwBPPamoaCirQdzwQaIYkypy9cUpQA5A96GBOl0SqEirQUkSeRVKZMQB7elehsNwUgyd6xTdJUVJSI2jfiskXIICT96eSayDbHiG0IIWNiRuZ716UFx0avm9aaqfTA0/n2NKIdWkJKhB9Y7fWgnT+TYrA6XbIIA5B7k716ywi3HASpX3mkBdp1iT9qyW7rAUTCSJ37UqSWXsFXgVU2FqO30+leeAFPQHPD1bAdqQFySPLOk9ztS7bqdap3AP5VlclYdYHbSUtpgAK+/NelnWZVsfQCkRdtJGnYxzvxWaLtDijKpMwB6ijnyBUz1VugFJjj0pRtkJWVpAEgb0gboBMHmYIr78UlAV7cAUjrwwrGx4W0kwRIPpSjSdCpgEcCf4aGovlLUnWoaE8RtBpUYgErjfSO9DZqCqN0HVMDYgUnClkqQdMdz2oarFRBCTtzPrSzGKoDZUsxO2w3ovs1QaoK7J3EbV4l1KgTsVDuaDOYyQpUHykQIEUlb4tAUCSFe5EGhTVAS+Nkh8RAVEcdqwXdNoWQVAADaTzQJeManEjiRTc4iVLBKTMg7nYUWnRsxZKUPNrGxBn0rxVwjTsoEDsdt6jKMQGuEEhQnaNj71i9f6V/Oqe57zUetZYabZIVX7aRyI7is04g0gFZXtBqLJdW8gKJISDBJrA3XjJISozMQa1NungarySUYohZMbb8GnAuUx801EvxRSsK0EK42r1dy4CNidvWjcUqbGteTumVDWQNtuZqg/jJxj8D0fxhtC0hTjWkpVyQfTufX02M1fJRoIECAJkb/pWrfx34gm36au28+d5xtIGvcJKgFCPod9/5V08dXZ89Nt4OZnUu58HAXkn5lIgD0JrXm+WFO6QmET3/nV5dXHyMLIG4CgBJiqLxJyHAVHyniNqjL7Pd4k0i3uiGIu4SsOkJCAYOnk1fuLdTHL3D0MqWEoQmAZgmteumaEWeFOubLE8n/WovnDqBdN37lvbrAQnYETvS3eaKtJqi93Ft3TmsGXCdRIPBrZvoH1vZu7FvK+ZtTxJ8Nh0gEOJP8Cv+jNc/enGdr26xPwHyVjbeZ2q9rJxSFpcalJG4UJn6imhNxkjn5OLvHqzZHqt0CetLs4nlpj8RZPLBXZhXmSSe39RuB69qgvxYdKsq5O6KOXt4lAxNSENNTsv8QeNO0kA7fc+lSbpb8TN3la1/B4yld620D4NygBS+PlUJEjYb+1az/FZ1Fxzq9mtm4c8UYTYg+CyoEAqPKyPXtHauy4fuWzz1x8vZQ8fIP8Ah5YOHNKevm0i1U6lZUOQO5/KrJ+K/qHkm56bLwfBUMu4tcLbSC2SQlCVJUomfpz6n61r7gWc7zDMKcsdHhqV5dQ5FRTFsOuHdQbbcfdcJjSCokz6Vwwm4NtnpT45cjSTGmDXCEXKJSSJ5FW/lRTTzCUqAOneZiqXFk5hl1ouW1srAkAgj+dTLLubEWzSUnzDatJuqRdx8Muy2SlxvYzO8GvX7BL6UnhQHA4qMYNmu2uGgS4ASN9+PapHa41brKP3idzxqHFQyg0qMbZLti6laVFBTsFDb3q7uk3xR5n6dqSyLly+syQSh1UkDvueKqdSWXUhQIiJk8UdyN0uzB1AxBLeC2DjrAVpcu1DSyj6qPJ9hV421gjNR6+4tTrX17zT1/wdvLmHOuYSxdqDbjtso+ItJ/gG/B71sp8JPwyYP0Qyq1iN0lo4gtAUt7/D7A/9b1FulvQ/LnR+3GM43dNXWItplTrykpbZ9QJME1V/Xr4yb3HLa5y9k5Xh2pJbXiHZAGxDY4J5E12JNLLPL7dpOHEizfiu+K4Wihk/Jj4ucauFBDjqFEJZE8qP9Kx6Pf7PfJOYsj2eO50xXE8ax/EkfiXnra6Uw2ypW8ADckdyr8hWo/SDBf2rmguvlV0+44FKUSVLUT6k+9dDbE3F5lQ4ehbyGVNpShphSklLgAkH67/Wa871Pqo8UaisHoem4adeTT/qD8Hl5huP4ixlF17EWrd1aWxdrSPEE7DXtB+u3uK1bznav2d4/aXrC7a6tnC0+y6nSttadilQ7EGuoeC5lTly4vcPutd2yrzaX06VIWO3rWg3xbPW+M9VsSxC3bQy5dpQXkoVA1plM/XSlM1Hj5/yo6Z8fWVMq7L5Qy6mQJgxvQrNTgdflUwTEnsaf4QfCb3VqWNp4pHFMuvYk54iwS1Mgg10K0kwO0QhVvJVOkD1IpJKAgKnzAbhQMVMnMrJSwqFzPY9qjF9h7ti6oLGpA/iHFVSQq3kHuwgDv7HbapZ08fLd+55yFRvO9RN1aVkALHA/wC1SPIT2jEdMgEp5++1KmndmZMsau1svKn/AL0DcuAsjc6h2J3oxjh1uFMBW0yKj6UBQ1K5G0jv9BVoytUTbTQ8FxJjeFDmk27lZ8pVEGZim4bJWnTI3rItlK1Abk8miotbBdDlN2UqKEgkbf8Aenr+I+M0kK2M/lTBhvw1SoglXFZugIKtO8zxvvSyTegNK0Lh9WgGYBMe4rMX6VpCN9vQ00QS3wkEqAPPFehkq+U77yaEfhjOtj1D5EQSRMjfasHl6VeYwfRJ5rBLIEaXNPbevFoTJC1TwdhtVZPIHoUS4S6RqJ95rwuqa1KBJAP0ivTaaVpcSoweRXqmdDiiOO5PBqaTk7AmtGQulK88H155r0PhY2Wo7d6RU2CZ7KOxT2pZhhTa4ABI7GtLDofbwZKUqCoKVxG3BrxNzMlRUPYEVmpopSkdyZivPwOo/MEEmTBn7ULTdILE/EClFOpR2mB3pVoqb2CtX/pVTliy1IjSN9pNL/gkKWfKQQQJFaKTA7WLB61LgpUIHOkd6y0nQkgQqNt5NEG7bxFwkJ24k06Nm2UiE7DvRdICwA0oVICgqCImOPpWTdutK4UFHSNh/WjLFk0oQtJUdue1L/hUqUIJ394pE5LJm8gY2hUARMkR5az/AAKikqSrUYgT+tGG7RKFqAEg8nvS6LVtHynY9qMk3kfxSAYw1xOkg6UhUmszhoQdKSZO2oJMT60cUynTpV83oDzXgQAg6YA7Qam6TwhgUrCf3RGomRvvSLmFqUqTqHoBR0OpBAIBEfnSalhRBKo27UOzDR2xS42HA2FSocxWmXx+4iP2Nh9psC5dpKiU7jSkkb/0962MwHqU2+ENX8aEcPASdu8VqR8emY2cRxPBLa3dD1uHFvAhYWCdMbQdvp/rXbx0rbPAcXKSRz76uXBS2lCgNOuP0NU3fFKQQrffYVaXVl7xLptEAqB/IVVV0kHUAqSfUcVyJ3lI96K+C0+mt63dYcq3UrSViPN61H839O7leJF20RCVnj3oblm8XaBKkHSUmZqc4Z1IZY8tw3OnaSKCl8garKF+m3TtzB3PxF2fOSN4qxsSzJaYGibpYbSOCPSoax1Zw9oJSESSOCDzULz5nFvMelDMhSjwBxQtBrtss9HVDBSJStJjYkelYr6iYFdnQ8tpRI0gK5M1STGHiwty885p2gn39Ir3D8DfxJ8XKQpKEbyRHeoS5Ywy3R2+m9JP1U+vErLuTk/DcfQm6tm0hJ2BRAP3rZT4TPh1yzmPGl3eYVC40gpSwuPlA5jv7jmtV8tdRbLLlk3auBRP+KOY71MsF+ItWBnXZ3Fwysb7eWR71SHKpZeUcvPw8nDNw00bv9df9n5knqJg2rBCnDrxKCtq4tiAUk/xEHYiudPUf4QOpPTS9da/BM4raoUoB20eTqgf4kmI+xq9MP8AjwzFhrQQjEHX0NpgBQ/p34HNJ438az2ZGtGIYcw6ViFOISQT9vWupcvHWjhhxc8JXeDV/B+nGcVPhk4RctL1QZKf9avPpt8OeccfWjx1s2LJAI1qK1AHsYED868d+JHDnHC8nDSFDvPH2o7ZfF7d4e2EWTSWBEaktgmPvU04Xo6J/krBsRkr4aMDypaN3+NE4k4ghZXeuBDCY9uDvRvNXxHYFkKw/Z2XsOFzcNkgItkBthKvUqHNanXvxUXWKM6L1T9wYkF1RMe29RO96z2l8/qdCwTzIH6VaPPFKkqOZ+llN3OVlk9QeqeZOpFwo4tfuJt1E6bRgkIj39aiDmGLW2AyNBB327VH3OrGHJCDpcBPbTWZ614ayABrBPEiYqL5b2VjxdPbEszpTi6MrZstXLj92h1YGo7ALB239D/QV0Nyhj1ljVi0/wCObZamxrbUnYKA2UI3+1co3OsuF3XlJUSdo0bTUiwb4o81ZbYDGEY2v8OB5W7ptLgSPQTuK831HFKauGTr42liR0Y6xZhwHBbK4vH71hV6ho+LcqAbSlI7mYmPU1zG6lZsYzbnC6u2NTlmV6GiqQVAbaiPczTTOHWfNHUAq/bOLO3jex8BIDbYPY6UxP3mozhjDtysu+h5J71uLjnBXILabJNg1ui5uGxoTo1bxzUtxZLVpYoAhCDtMUMyxgafw6XxOobmDIojmEk2IQDG0c12xk0SlEiVxdySlO6DzQ6/tGbpCkkDUoRA5p442SkEA/cUzWCiCSR7U6aexf4IHidmLK5KS0dMwBRbI6AvFvMO2ke1Y5mALqlkhJ2IJrHJ7ujFm9XPMkTtRqshqycY44UK7ciTPNAkw2SCSojc7fLR3F29S9Q9JHehJQiAtKinVz71WLvZJ4eTBltJUIVok7+prNS9IIKSTO3qaTPlkyRvtFeqQoqStStII2pnpUwpUrY6bfGiNMmdgaeKsD4RcUQf60wZahaVTsdoPY08dc1RB2G1K3SoNNKxupQClJSNvc71mkhMiAnaZNeteH4hMDf+KPzr5RShwyR5jAmmSS2K6lg9K0uSkCSN4iK8UokAKB/9IAkRWKQVSoRKe/tXoEHUZO20bRU+yUrZRKkOGHB5QPlPPpXqgQspnSgDYHvvSWgEJ/hT6Cs1FK1ABUmN/aqrPuQqaFEqAJ1jSB60o0rUZSoahwDtTVCUACCSknkeopdttKZJVBjadj9ai8vRm29C7TwKJJ31RMVi4oLdhO4O0jmkwtBVJXsOBSjSkiEkkE7zRpLYLryPG39pCQsjvXwdIO6SJ2JHApLxUNJ3JM+lZm7ZUsgEcbhXJptNZCreR2hxsNEp8wG4k716ytQWtMbc0zQ+2CqIIEfesTcIKwkAAEyd9xQTSdgp+R8XynVpPmHB7AV6hWqVEyeNqQDyAlZAkD86wF42mRwCPzNK6Q6qggXIbiDPHB/Os9lN7EwdjQv9oIhSgqRG496wRiOuQSEoHPbeisKjP5C4ciEgDy8KFfJc0jySFAQZFB7e8UtyUydI3HrTp64AI1qhI5pE3boN3gcF4QdKwdJgCvm7pBQIWk/fimL1zJhJgHcSOaZ/j/DACdx7iaV2/AWk/J1WzpkzFcjLW44kv2BgJuWx5R6avQ1o/wBfcUViWa/OpZ8JJME8TWzXw8fG/YZqw5GAZ50L1o8JN2oDSraIWK1w+KJGXmeqV25lq6Rd4S6yHRpMhBMyn7RTJtIg+Op2jTXqe4peLJ1FW0gBMx7VW90taVrVJBHcVYXUS58bF1wmQn1PvUAeIW4ojv2TxQpUdMXaCGEXhVqAPt96UumlFZUSqefT7UMt1FpwdgOPai9sQ+kajO+4JpKV2N4GYeKVAFKiqfTil7J3VchxaSBEA80+Rh7SirnWTMiRUiyhlZeM37bKE6tatO3f3+1L2UbZTjg+WSgtsG2WDXGM3CVrbWtlvzBJG0+tO8TxVWFNqtrYKPZZSNgfarXzTlVrKOXUrSjwVAHRpTGrtP6VTa2S88taoKiSCfavI4Zr1nI5V7UfoXr+NfoHpIcPG/7k1b+hoC4o60IOtQEk/pNYPG4AHlgjcqow1ZJSkbwYn12pZNqktwkgnsmOK9ZyjFdUqPzucnOVvYIQ0VEEjcd/Wn9i2ltY8YaUkzzThNgAJSCduVcflWbViFpUJkjsaKb7YM1jIaevcHTagIZZS4pIBPh7io2/86lN+UcAE8U6NkGUA6SZ7HvXwa0qMoSFCmc3omlgErS8FgqJAmvT4rgJB0gbc0ZTbJhRKSe/lEVm3bNgalN+U94pVQ3kBaXHEDU4Tp9abpsi3J3XvPFSNVuykFfeYAJO9eOIQZ8kD+VGLrYKACLctEK0kTvROwYWEJTOr09qeOstQlKUnYbk96XY0pB8qkpSefWh+S8eA/Y7w6zIACjAnfvRm3dbYhSE/NE7cUFPmTtImDApZh4reAKIG2yRH3pVbRsbLoy8pBwtsjY6Z/SovmTH2WLpVutaST/D/rT7LuJoaw1A3UQIgcc1W+erh1eIuOgFCAYBjeaaLaFJH45eRAUkg7ACmb6iwdRMCN5qE22Yn7dA85WDv/zr66x5+82HBG5UadYZjLMl6Lh4JmRP3FKZT2xVAP8Ah+b0oTr85BOogb0UyyYxZsxAPYGiG00WRi4DbCFoEAp5PNRsvEqAMkTJA5FS19DZbTqhI08/60JRYtFnxVyVbkjtV4NtUyNp5BKlqlQSDEc164VRqSDMcxxT5xhpDaRJ0k/ce1PbG1afc0HzTExzS+aQHnKAyX1BSgPNtx6Uqh1SjKt9PAAone2bdteBrYpO4kRtSptGW0qgdqKpOhmBPGIgE9t6xF0d9lEjhJ4okw02+52VBMkjejrmCWv7LS6QkkCdxyae08ATSZEEOLkpKiQRuB61n4y0qKSZ0+1G7Ji3eOpaY07AGk7tNsy+lvwhpmJ3O9I2pYA1eUB03CwNKdUkb1mHFoGpSVD1nvUlZt7RcBI0mBtPNMnvDTelJB9JmQTWaaNsFtKUQVJQNzMf6VmhxZMLJIO23arAbwW1ODh3SFqAmTzUetkMXDqm1DYcz3rKRur+SPpS9q1gFKBsPcVm2Vu+eSdiPLwakt400wyEpAIA23g08wCyZuWSsjc7Qe1Zpb8FNYZD1l0geZSSBxG5+tZW7bq/Mrj171JMUskM3SUpCQONhzT+0sG1MSRKh3jg0iecit2iGqbcSYCDxJIGxr3wHNHl+YfwzzUlQ0hN74a2wR21b07xbDbdDAWhMLImR60ZLNIF0Q+HGm9JCpV5gkCl2WlPJSQlZhOwVR23aC0AqEk9/avS0m2CnEpJNLrYWmAS0oNGAdc7CeKSTbLJkpOmNwmdjUvtWWLpouLQPEUN4pNptPnTp4NLKTGSVEftrV1sqLeokbxwJpRxl1Sp5HMGpGFtqCUFKeNzxSQbbduAkpG/rQcrYf4I6i1XqWSowNjWBw1SyTKhvxHFSxVm024Rp3UOR600LiGiUjSqPUUIzTdWBuvBEMJxJTbjKmXCFKqYrunr1lLjhEqTye+1SDrv8GmfPh0xf8WtH7dykXP3eNWiPK2PR1HKD78VFEvhnDtaTsUE7/SrJ4ZK+2Sn844Wq5xRz0SYAFRlWAGUwmAN5PJqXZgxQKxF+fmBkAmf+9AncTUNEH5tie1c+WXjhDZOWVPeHpgFQ3gd/WiGH5WcQ4PNEq3k80n+3FQhuAI4UkwT6ml0Y6pBCgrWDwTvTSUmsYGVBRGU3ApASFFSthEmT6VsT0S6YNWaUXTySfDSDuBOs/UcVVHTB5zM2PMNOJ1tteZZGw9q22FucvZXUtCFfuGVvr0iDJggT3/515P6hy/i4uq2z6X9A9L+f1alLUcsofrzYLxrHfwFslSbW0GnSkbE7E/eaqywyEVKAVJnbfapNf57fxK6ecfI1qcJUT6zSbeafCURpBP61X0fE+LiSiS/W/WP1vrJTu0sIbtdPWwOeN53/KnDWQm1Ln5RMkxTn/xQVqSBKgSYJ77d6VOZnmykRueyhXZVrLPn/I3TkJIc1KAKD6AcVkcjtW+sgAhRB3HengzS4JQYPlntE1ic0uLCdQgk7wZFM+y0wJ5oapyG2SkaQEp9BBpVHT5hZgtjV/iO5FOmszrCoVERP1p61jyykr8se3cUilNjYWwcz05YbJJUVKjjtNZr6dMqP05ijTWMLcSO/wD6hSoxBZUTJmNu1K+10De2R9PTm216oKldzxt3rIdOLQgEpE7TP9KkAxJaIkk7TJFZN3aiNUwTzTOT0jNVojv/AMO7MahpCgDICgNjXqOn1kmCpIWAdp7Cjn4pavk3neaxevVKAkkChlvRn9AtOR7QLJb+U8pJmsxkm0QDsAeaxexF5tKkiVE7apihGIZgumklGtURxwT9KybyZ7JVa4Pa27WhoAqHYGajWZ8Ebvf3S2gSd5OwNB8CzZcWuIRcqUEe5gVOng1jNml5MahuN6d2nbA0UPjuCvYRdKAQfCPBihraStB8sBO2wq3cYwhu7C0OgJM8q4NV7iuDLwxSgjztqOyo/SqRnaFSxTAQUkrJ5I7TzRPLxP7Ta/iIMwN6HXTSUAgAoCu6acZdUpOJo8xTvuoGmvCGRbl04BZjkkJ4/nTIOJFrpiCr+dLr1G3BB5EUMDh8LzAj03qybySuzx5wpQjV3O8Gn1kpaXmtImdpHpQ24eJbSFDSJE6dzNELGXHGlJMb8TW0raNdIUxVM4i3IgxG+9LXKCGfMEiRMjtSGLILd8hesAHlPFegLd1hStyJFBYWMB+xhaNkvrMSJ4B4qUrdQvAjqQAoiIjioo0tQfUAISOVe9SdKf8Ag51JAOnsdpoOkIsvIJwzU4oiBpnv2rHFWClxJiUz27V7hNwQfDIAGrmvselMEEpHuKLvaGSF8OQS1vsvn603UCu70mCCd5pfC3NbWkrGuJmNqYXq1IuUmY80xwKP8A85LGtVJGDKbBlOmOahjDCmbha0ETqMpUPSpLg6/wDg41GTH6VHiUrfhJGonzajQq9hqhTFCoW+swSOwFPssPKdSpI27/SsLxrVbEKKQQBMVjloFm4KVL1J5Cak6awhrbM8aSFPABMqnmadM3BbbAiAE70njgJeSsc8bGk7VBcZOsSIkj/WjWDLOzBTiReBw/LMUVxMeJZJ/wAPefSgVysBwRBg/lR5wh6wGwckbzW8ULJZVAq3cCGyPmEQe1fOK1qIBgcaSNpr4Mgo7agO3avSkoUZ8yeZp270h1Q9waAhwODz90p4FZqbb8dZSACQZ+nam2EElxQJJTPzTTp8+ZxKClUp3J7VFP6CmNoSUifXlNfOQ042EAnfmeKTR/ZTJWfyJpNxR8ukRJ7niikngCvyE3IMH+LgAmmLoAXsoD1p2tuWBqgKkSR3NMnmVKXI1xHaptOLwau2mdes34xa3WRcRR4CXmC0UvWToB2gyIOxFci85i1bvcVTaNfhrXxl+G2nhA1Hb6VvVmrrLh+Yck3LljdNpdDclOvS6mRtt6VoLm278Zm9dUoFbiioyPcmnhS2c3Hc5ZRR+OuKRfvOSBCoGng/ahTjmyjqKwDPO9PMQla3Dq8QBRKQBH23oe6sQRPzffT9aftTuJ11imZaVJWhUaFARJjcR2pa3c1kJICT/CfUU0cd1BJnjYA7cUtaq8zY0kLmkuw/8Gz3wv5XS60q8dAWXHCryjgJ/wC1bzYp01bw/oLi+PXrUOCydujtwIOkD7RWuPwt5ecewiwYabSovNoQEdvMd/f1ren4pmk5a+FLNyW4bLWFlsEDjYCvH5oLn5lnCPouL1EvQ+iuGJTf/hxVTdrVcHST5/MdqJ2anAdStjETtNBbVwqASRECIj0/6NEm3ioJEGR3r1kqPn3d2wmwtSVE6pVOw4ilXLonYggg7ntQ9u6gpCUyQfMSJ+lZm5JK/LtMSrakasTI88aCQUbnvM7U4Q6vSEqSG0ieDQ5Tob3Own86X/EoWqdUGfTYVVRaVtg/gINPQiSJUTJmnds8SQAufQGgi7pKRsdfaR3NesX4BCT9BQadDUyYW7ykGCYMduDRJt792oKOonzT6VGsOvwtMkgae3eijdyHFykjTHHrUWse4NBdBBbAJk+tehe5SEhJ9aYIdAIAUEkkfelEq1qGwJ9SdqCrawLQ5JMApVuCPeknVp3BJkjbekEOBuSdwOYNIvuoBlJKp2+lUpvRkI3bnII8vr/yoFiDqUEkiCBvRPELo6QTM8bd6juJXYCvNJSfzp0vAHbYFxt4269adiN96O5Dzp+/FrcK0pIAHsai2PLLqCuCEzMioy1fKZeStKtMbyO9CsUFM2MxXD27+38RtRKu2nvUMxHC/EbLKwQsHjaRT7IGbW8Vs0MOqKnQIg/ei+P4aVJLrQjaSZpLcX9BWincbwpyzdJPygGD70xwYhOIN+InXJqaYo346fCWnSudz2qLJsF2uIJABUAZntVmrz4F8FhqfUm0SEqSDp3Imaap89opSYBB3mkvxSlWaTEGIg+tN0um1th+7kK7TO/0p06QlNu2fPJIaEqIHoaJ2C0oDZkhUiTQK5utTZJGoe9FMLfCgjzBQ5gDce1NLOEzV8hLF2i6+yrZUASe5rJ0KS1IICf1pC9cWXUCTuZpR91SmoBEAwY70XJVTAog60WE3SokzuZ7VJTcE4OuCZNRe3aWLs6TKe9HwkuWCyCQAO/rQuog2McOcCnAFAhU8inWMp8XSoxI3OqmGGpIuSD3434NEMc8zIhUBI7iRNKm0wPCEsM58sE945pHFocc2QdfHtS+BKC5XumB2FN8YeW1cEKjUTsEjtP607q6sem6ZMssLDuEkLMrAhQPao3cJ8O6cUlX8Z47CjGWLoOWJABSQIJ9fegN65F84TsNRIHr70JW/aBNK0GkKT+D3lYAknmkcKWE3fcJnt2pszd/uiJ8ihuBTjDAk3ySk+QgbHYzQUrwjZaH+KrEp/QetNmFEBXJQRBE0RxpMBEDUJ5NDGHEoUdHA2VtSO92Ml5PnyEAgJSB60QYXqsdt/8AKaZXCC+EK0+VPMfxGnthoFsoEAJG5NKvljSX0MVvIQNJ1Ff8W3rSxWlCEJCfKdvcVg+iUqB4n6GkUuLCikAgHkkTv7Vot3l4A1dDuwUPxZA3G1PHiUumNjBBB7UOt4bu0jdJB3p9dqKiNjJ9eK022qQyasZNKQCQVeYbwkVi4lQZ1jz77e1JedklSU7naD2rNa/EQVAEjkitF4wCnoJ20qswJCo+1NHEOLVI8opxYuFdqoADUPSkn/EDncbTE1KUO7uxkvAdvcUuGb9Pneb12+jzj5o7SO+3eoBm24R+zHVETCVTvuNt6uvqrkB3Ljjt9bXa37NDpYUyZHhzJEexiqIzu+G7BaVSDoJ2q3JSZzQzoqJ5QS6tSlAI3MntQ15aCZSZCh5tNPLgtgzpO5n/AJU2KPGeCEt6lf4uw+9Kk1s6LGzjywSDpUjUCCefenlotJcbAURJmmly0LdSUAap3mePalrZwBaRq0qkbgijL4Gq6On3wb2aLljD3EpKtLjAj894rbD407Q3Xws57QkGU4eVCDxBBrTH4GsfbukYYhRggs6jq7jY7Tvv6Vvx8Q2BHM/QTO2HJGtT2EPwPUhBP9K87h/zpnp+sVen4q+zgwy6dKZME7lVOWrjymV7Gd5maFqLgEKCQsHffb0pRL6ZIUSFHcAV6GWeXVhMK0RuCZjynelg4PEg7piZNDGVEAKUokHbelW1LUoBXmB7jmmkngXT2ElPBZEHy+lKtqCkhOwIkmDQ4OAJUN/9a+SRoCmzG0ATFDWDaWB8/dEJWSkIPZQG0U1aulq0qMwNjsd6QfdWEkEgd6TZWsAqnYmDHH1rZQyoluHXKigKidQiQP1ozZ3GiIEg9wNqjGGyWtyZH8Qo5buSQgE8cA8+9QptG7LQXauSlRASVEbiN59aVac8MGQQOySaFggk+aSBAjvSgUvSNUyONqarx5D42EVP6Upgge8dqbqeKXdlAgjYn+dNXblb8AAFXoNpFJPvlKvL5gntNK319wDC9cUCfNKRvuOKjmJLSfMN99hFFLx3UpKSohcbgegqP36yUKAJO5kT+VVWGAGYk6FNwQOOO9RG6Ulh9SUrkbb80exNxS2lKM+WQPTiY/Woe46oKkoIUDP2psLJqJPl7G3MHu0OJcBSFSEg9qvnLuNs5gwxKgoa4gx2rWFD40iVaSN6nGRM3Lwa+abdWQ2vbc1OXyFWWPm3BF/2jWxBExQLB2kXCnFOpOoHYkRFWC2G8Zsw6iVoUPMewqJY241YLW0ymFRG3YU0G3sWqBK7nVcHSkeFMCeKa3Vw849oSZExA7Uow2VkjzJQrmN/+jWbrKSrUNgJEDtVY+UK028jFLTq4nYD370awZ3U8hOqP81MJCl+VJA7+n1r2y1i6CkDbgHiDTJ26B4DmLkofQkTqIBmllEhgSrUY7imeIvKLiAoGQNlTNOlIKrZCkp3jzH1oPDtmXwxilZRdgQZ4IqSsInD17kbbj1qLJJXdIlRmDAngVJmCDZrknShMkaue1K0G02CLJ0t3aZgQZmiGPHxWQUgwdxQ1oqVdkJGlIPmJMD86IYmj/dkjeCIgGjF08goTy+/4epBEe471hjjYFzsJSAI7xWOENJkEBUz8xNOMWTqUk7qQZk+1ZvOBk8BTKiwGXVphW2nSDvQnFfLdrAAGkzuaJ5bcbaS42lBPlkbUPxVwKuVuKkT2odnoTeUj5H72zCiqD2A7f619gtyRfNlc8wJ5BpRpaXWIEKUBwDIApO1IF4jyQoHg7VqTYb+CVYgoPJTtBiPWmYQnxYlIVEETvFP7hWm3SfaKGrRpUFJMkbR9aFDJ+Ge+GF/MpQAn7UphbwDZCyQYPlJpF59KdchWtOx270phaVQuQAgDn1otZ2Fnjjuy1GSAT9RSSHx4RIBV/rSr6DrcATse3/KmqHG221BxBJG4IG+9B40GjFbhDiT2I5Hr6UcTKmgFCDA+biaGeIhKUnSSBAG1P0uOOtBQHH8J7Ut4sZ+7CwMLz92SdWw2NJqTrQUlcRO/t/U1ndKKFEKRrP+HbemzoUpIOooPMGBQjbVxNpj3CwW9aQCkxwZgU5UpLkKUSfpQ/DFr/ELSSC2oTp9aWWFhUJUQBtvsa2euWbejaH4lsXwfHMp297gtyi+sngkpdQrzA/4FJ7KG4rSnqEot2ZQCd+xECPrVz3f/wCXsR/+4j+Rqn+oP9zP0H86pye1nFxYbSKeuWHCryjuTxxTTwFJVqUZSBPMQKNvcH7UOuflT/lH8qEXcTt+hg5qcKSoak9p2kU5as3XAlbaSmR3EzRf/wCW7/mH86JN/wDl4+lZO2F+3Rsj8GGbV4NizDCyqUKUEj131T+c12Ow0s5oyghKwFs3troUDuCFJg/zriZ8Ov8A5/Yf/fV/IV2d6P8A/wCnOA//AMRH8q879nqa+Uerze/0EZPw6/8ADhH1Q6fYjkzqLmTA12zmvD8QetwAknyhZj9CKjTWBX6VKHgK22HvW13xS/8A7ic9/wD9gv8AkKqFfzJ+g/nXdSbs8a8FbowDENBUGF+sHnilk4LiBTqSzv6RxVi//U/ymk0f2L31/rTydKzXggLeWcSd3KCBECd96VGWcQU1pDRnmVbGp3b/ACj6inr/AAKlCXeNjFbqylihCDo1EiJkV4jJeKJghBSSfX/qaslP9kfoKyT/AGX3NFuxc2RbDMtXzJMjgbT60Tt8v3YlZkKHoRRlH9o99BWSfkT9aTeDXQKbwK43BISQCZmaWbwi5EQ6NWw3PNEGPmV9T/Omrnb/ADCjdG+hBOX3XJOsR6+tN3cuPOLILiJ9RRJP9kf839awtf7dypufZtfAUgSvKr7i1qK0rJ2SZihFzkZ9xJ/f6RMQk71K3P7VH1FIXfyr/wAwqyfk2iuMdyo4wyP3pVpESD3qC3WCulyEkqGrv2q1sd/u/wD7v61EVf3p3/If50yD4IgrBnUJJMlQGxHBp7hmDLLyZJSRvJO0Ucc/+b9DXln8zX+b+lOlkFlgYfnNGCYMm1a8zmjbfio0u/exC5dWuTq3CgaFn+0P3pez5H1FFJRi8Ct+Qkw+AtKASEjkjesluq0kJXpBEj29KSa/tF/Skrr+yTW41UW0ZZHLb5UoEL29BTi0Shy4RKilQMzG1C2fmH1ozZ/2I+39KGlYHsfYgoBKCVEJPEd6c2tz4rKhqHr7kUzxDhP+Wl7L+xH1P8zTVasG1bEXoFwkoI1JECPSpHYHXbKIEyNwRUcR/aq+9SPBf7r9j/StNdKSCnSsCrR/vHl3Uo/KDRa7X/usAe3vQx3+9n70Uuf7qPvS3aJwbbB+HLi7gzM7E0+xZpKZMEqOwHcUzs//ADO3+gopjX8NKpYKoSy0VpKwoGCY37+9Y4qnTdrC0lcbwOKdZV/vz3+QU1xn/wAze+o/lTXlB8nlnoUwvSYWN9M7ke1IspKX0RsFE8ilsM/sf/eK8V86f85rTwrRiSPOqes0hO5A5HamsBKkhZkE8Urb/wBxR9P60xc+Rz/Kr+Rqd1HBll0KvrT5gqCI3I3r6wcK3YkBJ7Umj+4j/LSWGf3z7/0FMn7qGS8hG4bhZ77etNW3ElKp/hVAJPIp1d/I59DQ8cr+v9KVOhksWKqEx5PKPQ7GiDFwhbKgAU/zFC0/3BH/ANz+lL4d/ZK+v9KOwqNn164FK0qAVA2NJlAZIX5Vf+k1lcfJ/wC+vMQ/uyPqP5Gk7O6NJVkaNqjEEKbSUpVyDwaKquVJMKUUn6TTBv8AtWPp/SnLfyn6mnX2CLt0f//Z"
save_base64(s1, "test.jpg")
# # img_str = "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAEsAcIDASIAAhEBAxEB/8QAHgAAAQQDAQEBAAAAAAAAAAAABAMFBgcAAggBCQr/xABQEAABAwMDAQYDBAYFCgMGBwABAgMRAAQFBhIhMQcTIkFRYQhxgRQykaEJFSNCscEWUmLR8BckJTNDcoKSouE0U/Fjc5OjssImNURVg5Xi/8QAGgEAAwEBAQEAAAAAAAAAAAAAAAECAwQFBv/EADkRAAICAQMDAQUGAwcFAAAAAAABAhEhAxIxBEFRYQUTIjJxFFKBkaGxBhUzQlNigsHR8BYjQ1Ry/9oADAMBAAIRAxEAPwD6oVlZWVZZlZWVlAj4kfpBdRLvPiOzqWXEg21w+gwPF/rY/wDs/jXL17lri4Cmnie7PiiYE1dvxh3Pe/EFq14grQq6dXyeQVOuH+Yqgb0KKioEpM8kVWn6kKuBBTi94UASBPT+Nai5ShUQST1MdazjcEKMnnyn86QdjcSSQPWIM1puX9kqkxb7SlEDbzEciRSrb7TYUoI3K2kSDQ/7NSRyQroTWgEuQASk9J4q1SywoKLwdAISQkcGfWiTeOMeFpTjXQnYSnd6efNBISEgQRumD4o/Gsnu5USDHQhXX5UnTVhSHw6lyl7a21r9vuw2wFIZAfXtQlRkhKQeATyY61fGhs7jsRicPeasfeGELgbcDawHFiDwnd1PSubWn2AnxABRPUEjik8rlLjJG3Q5cu3DLCe7aClEhA9AD0qdrXDwZT04Psd4M9pnYHcFtIus7a8QFJShX8SaZ7vX+iTknv1Vlkfq0GGnbzwOmR5gcVw20hW6ADuHlNEC1WeSDJ8hWy/+jgl0abvcds4btb0xoVS303rmWddBSllhQIg+pVAFCL+LPTtqw5aJ0+0hIWVbvt0Hn+yBH51xim2dJJA5noRRKMetaTCTHTw8RUtxSUqdj+yqtrZ0xo74jbPR+eyl5bYvB3rV4VbGckjvO7kzxCgasX4cNCZH4nO245xu3asNL419q7yabKe5RsjawlR5lZTJHpuqhfhq+F7UvxFdolpp7EJNtZpAev79aCW7RiYKiehV5BPmfavqB20607P/AIBOwjH6Q0rbtIyryO7tWV+J+5dI8b7ygJJJ5J+QECBWHU9Q4Qwss69Do4qayUN+kq7Xn9Y21v2d2uRtGHCkXj1qHQkhpE7Z3HzI4Hsa+a+D0rkNT3rlpirVd9cJQpwttiVbU9T9KdNfagv9Zavyeey90u+yl+8XXXlEwD0AHoAAAB5AVtoTV17oHPpyti6Q+lCmzCoBChBBrDR03GF92depHZJpsCyegc7ibcvXOMuGmUnxLKOB/dTRj8Vd5O8TbWyC46s7QgHzqz8l256hvVEpXbbHOAhSN8/OagzH6xTlBf26FNPKWXEqZSQAZ8gK64uTXxEfQItuzXUTz6mRYqCwqNilpSSfqa2u9IZvS90bbIsmzeKQoJdUnkHoRTo8jUmXfNy8u9efVxvAUCfLyigb3EZp5SEXX2pxwDYgOFSjHoJ/hURbsExvu8LeYwsu3DzIbfG5K0r3A+oMdPlSSX/1NdMXdnkdjwM7mCpK2/4flTg1onNXu5Qs7lxPA2htR/Gkr/s7zVjZm7dsX22RMqUiBTSv1JxZZ+K+JLWqsQ3jsnd2WrcekbU2+asm7wAeg3p3J+hFIuam7PtSc5TSVhiLlXKlY1dwwmT18JU4PXpVQ4/FX9w5stWXlqBiEIJqYYfQmorwIV9lLaVde95n6TS2qPDKqySP6J7P7kFVpkHmJP3VOkx7ctimy47L9PqWe4yqlA9PEnz6eQp8sey/N3RT/oq3eUeAruhx8yAIp8Y7CclcDccbYFXkO+dTtPyAqd1csFgr89lNkUrjI7+JJEQPxNDOdljbe4N3hWU9I2/zNWqv4fsshvvHMJbKSOqjdudfkR/KkMl2J3tipJewNsBEy3frH8vehTVYYUys09lyoMPnkck7SJ/Hitz2VMGD9vUFHiAlJ/8AuFT4dlECDp9D48h9uM/xFFM9iz10QlGj1LSBMovkk/mum9S8J0g5K6V2UWyE7l37qQJHFvIn6KNA3nZxbsAqOVUJE+O1UAatC77ELlpoF3Rd62BxKX23AD9HKj1z2dM2j3j07fpjqChRIPn0VRv8sdNOiBO9njigks3TTildATs5+tDXHZ7mrWSbbveJlDiT8vOrFGj8Wl39tgbpSRyolFwg/kqlThdJMtwrEZG3nkrRcPoCffxDmktR9iaspm8xd3ayh22cQR1BBoRCS0eRz5keVXDdYTTNwR9my2QYB5CXHwsA/IxFMD+j27glTOWt3tvXvUoP861UvI68FdwQoAEyRPBinHGahvsM26m2fUht5OxaN0hQPkRT7daJvZBQq0uOf9mSmP4U1P6YvmlEOWLhA/ebPWhtWFCjOeDqytaUNuK6qA4UPQj+dSfT13fWyu+xTv7WNxY5KVD5fzH5VBHrNTKoU26n0DiK9tLt/HOBxhcbT0mKHVeQLmVf4zWgTbX6V47KRLbkcz848Q/A/Ooln9K5PCOKuQtTSZ2ovGFHu3PY+h9jzWmJ1HY6jZUxk4tryITdCUyeg3gf/UPrUpsc/lNNqTa5Jv7bYLT4SEhRKY9ejg/Op+VWHqQj+mjriE22esW71pPR1I2uJ+v90Ufin8Z+uLK+sXvtyLZYc/V96sJUYM7QQP5fWpZm9DYzV9n9owz7bDp57hX+rJj16oPsaqrPabyWnb0ourd23cSOFEEA+4Pn9KpuMlQ6xgsa11FjcnkrvI6pdLLiiG2cZbNERz5HoY9Zq6WezbR17p9q9eW3ZuOtyhh9YbUAPMg+cciD5VyZaakukQ1c7blnpteAn6H++nBTthkgCLl21dAhCXiSkewJ4is3B1hkttHQV78PVnqBl5Fg6m4CU7tzCgpSQfOqR192fZLs/wAr9luELXbq8TTxEBYHX6irD7LO1/8AydWarTI4p2+s54urJ2SD7p8/xqzNVav0b246QuLIZywx92yhVww3kV9073oH3RugwenBI+cVmpuMslJpqpHK1rkXrJxLrS1JU34klJ6Vaei+2nNYv7OgXbqtqgsSqFJ+REc/jVZZ3T11gL+4snklK2l7SOp+YPmD5GgWbt1te9JKPlXS9skRXg69a+KXVaWkJTnrtKQkAAqUSB86yuV06lWEgSvgeX/rWVh7qI9jP0wVlZWUizK0eX3TK1/1Uk1vTbqa6FlpvK3BO0NWrq59IQTSYnwfAj4nb1V721alf4KQ6G4j2k/mo1UN0rcqPP6eVWv2u2Zy/aPm31rWgO3JKOPvDpxxUDz2FZxd022h3laNx3GB1IrWDpJowUb5Iu4nfBTPPpSSlQIUZJ496fEYEXSZTkrFuOgdfCQr5GOtNa7XbIC0kzG5KgRPtWmOUbPAOEHYTBCZ60sptCYlRPqIpVLZQ3O3cQeBwJPz9aR7pSk+R56AU8Cps1ctgndtPHpSSGkqUeJ9aPt7F5xO7u1EHmY6+9EoxTvPEmOY86TjbsOMjX3QUIUncOsg1uqzBZBAiT09qd3cSptKdoUox0ilf1W4tKOSfX5VS2VkBjaZUgpInnjj0p0tLd51AMSU+Yp+wWnV314ywGVrTugiP510lgex/TSHLRLVot0dynvCtZMK86hyjFIhs5XZtXiuFyQRwCn86mPZx2eZztG1bjNN4SxVe5LIOhm2aHQrPmo/upHUnyAJrrHKdjGj7aztUnGJ3PI3LIX40Geo+nzrr34GPhzwOicQ7rg45pOSvNzFi7ydjIMKUAem4j8BUvWj4CMbySnR+mdE/AH8Odze5J9tx61a7/IXqUgO5C6P3W0DrEnalPkBJ8zXyZ7Qu0fU3xF9pWZ1jqRpZeu17bG2Src3btSQlpA9uCT5ma7b+NHW9j23am/U5uUXGmMQ8Wm295CHnuineDyByAfn61yp8NGg8bgdV32RzX7TB2OWU3culI/1aCSOT1kRXMql8TNO9HN+vcUrEFLgO3vHnGtoHI2QDz8zUWt3wuAoex5q0O026scza2zpe+y3BfubkMraUApLi5G0gRxHSq4sLVxxzahBcWTHhBNdWnO1aNNXRlpS2yQ6aYat15e0+1JWbYOJ3FJCSBNdGYZ60ymQbssJYruTuErCBtSPc1Q+DxCmstZtLbIK3UlSFDpyK780Na6X07pZhx9dpaOJSD4IKvy+lZa0lGjOKT7nmlNAWiMa2q8xtupwgFXh3K6+9OatH4rGNLcVj7VhQ6nukjj5+tNeV7ZLWzQWcTaBxfQPOenyqNEam1/cpJDhZMAgDakfyrizyVd4Q3661au1tn0YZlt91tJ2gpEfQ1znfJ1Xq3OBD1q7e+OfsyUkIEnr/wB67N012Q2CEoVkbkPPRKmk8CfSp7b6Q09jWkIRbNMoBALgI3ce/WrjqRVJIMcs5cwfZ3fY/CpfcxiLNAEqZEAj6Ac1KdF3+n8RcpbyuOQ6gmN4PKfXg1eGR0xaOtqDD6SgklIUByKrrWnYrZakM90pu4UJ76yd7pwcDkkdfrNS5xk8k/QmFqrSmordCbS/ZaTEJZkIUP8AhPNPuP0nj8XbjY2lzzlSQSZPWuS878OmtbNbn6k1NfvbTIRcLM/Lcmf4VDrrQPbniCe4uM6WxEfY71QT7cAj+FaqKfDBX4O6l4m1cChAQZEDoD+P8qZc7pXE3oDbgaSfJRVEfWuIzj+3QkOutaquFJkJUsPLI9YPv+dDOaS7YcgkJdxmoFb5JC23J+tL3TT+YZ14jRumcc+HHsm00QfDvcRE/jzRj+s9IafYhzPY9nuxM94kkfQVxRc9i/avkSS7gsw+mJ3Obj/E0Er4a+0h8wvTd6DHVRQNv4qqnpRjlyD4rOzb3tz7OrBKkXep8e4ocEN71/wTxUay/wAQXY66Vd9kGrlY4UGrNxYH1iuV1fDB2lHbv0vfAEcFW0fzrR/4XO0lojfpi+2E8xt/vqvdwaWQuSZd+oe3DsicQo2N9kd5G5KU2Kto9vEagV722aNC1d2u8cQDwfs4TP51CFfC/wBo4Sn/APC1/MTt2j++kB8L/aOkkHSOUnySGutaRULwwabHfK9rOkMiVh3H3Vxu4P7FAiPfdNQnM57Sd4lSrSzyFsoiUwUkA/Ld6U6OfDf2kIC50flBAEksHj60mfhx7RzuUdH5fw8Sm1Uefwq4yUeWG1pEDdvEhxXcOubOI3cGvUZ+/t+EXSwjyExFSXI9jWssO4U3unMhbFX/AJrCk/ypnudD5y3VtVjLpMHzaMTWqaeSc2eJ1pknEw6tD4HEODd/GlBqaxuEbbrENKWeq0eE/PigHdM5NsyqyfQn1LZ60C5buMuKChzMHypvbWB2SBNng74hVvdP2j0/dcAUkD2Ig1IcFkL3Fs/Z31M5fGqVyyV+JJ9Uzyk+9V4G1NqiCFH3rdlxxpwlK1oJmSDUNuWBluNWBk5DB3LyyiC63G19seYUj99PvTzjdbWuRa+x5q1ZftkgzvbK25PH3fvNn3TNU5aajyVjct3DVxtdbPhWDBj51N7LUeJ1ghDeRUMXkuouWxCHD7x0rPa0uCafNjrmuxuwzbSbrT142w64Z+zXbgLav9x0CPoqPnVbZvS2Z0rcKZv7V62PkVJ8Kh7HoRU2fZ1Doy6DjS1uNuDhxv7rg8uOivqJp2xHbBbusCzylqdh8K0pSCn6oXx19DVqbXqFoqi0zL1oB3RU31ktq2z/AC/KjF5sO/8AiWUOyOSUwfxFWvd6c0XqoBTSmLV9Y/8A06+5M/8Au1iD9DTFmOxF1pO+yySShXI+0NFAM/2gCKblDwCK/VdWE7mCtgnqhY3IPtW/fWr7Tig+G3AOEGefyp2uOyXUbRPdMNXQH/kOpUT9KGT2b6pbmMJeKj0b4rWPANDYGmyASsyfesp0GhdSgR+pr3j0YP8AdWUe9HtP0z1lZWVxlGVF+1C/Ti+zrUl0qNrdg8TPQ+EipRVafEnfpxvYXrJ5SthNgtCTPmYApEy4yfD/AFFdoudWZB9YK1m8cIlR4lR6dKTTpe61RmSbBtNwLVCVvlxxKAgEmODE8z0mg7N5LuYcJARufWpJ95NTjs/KGbrLvuOJSkuISCpYAgDn8zW3CruZqkOWB0ffYu3h+zYWo8ghSVcfOnpOFSy2pxVgyFAR4UJ5+fFSO2vrZxttKX21+u1QM0s6gOkgeJJEAASZrO2grdmyNN4nFutJD2PsioCSpVuiZ+cVYHwq9hvZT2pdo2q3NbIxv2bFttN22NcfXaJWo8lcogGOkGo0nHKDJXsPPJAE1Uulu2ZekNX399bZW5sQtbjLlslreh5uSAFDj5g8EUm9ywjRN2fW/HfA72C3Nmw/b6DxbrS0hSHG3XClQPmCFUafgb7DVJA/yeYwwZBVvUR+Kq+YOK+J+7srhal6uyqWuYBZWsA+XQ/3U9W3xgZG3IU3fvX6giA6neypMcAQQCT9azub5Bujqntb+DDRGAc+04vs4UhkOLAct3n3R18JIQtUAjyKfKq8wvwV4P7Qci7vs2Vufs2WMNd3QkdQSVDgU59iPbTqHX9v+sbG7dSUSlSb66UApSTyIkx168Vf1x2mayw7jaWctau278fsnLhLziDHITHJ/jWS1XpumPbaKj0z8CqdRaidv7cWzNiXSpb9xb3NuVgxO1JWOY9oFJdoXYjgey3VScQlxT6wyl4FN8EdZgQppRniukbfWXaDcNKK2LdtspBTcONqSPn0j8TVV6r03ku0W/Oob1eNRdFGzv03gbTtTPCufzprWvMjPZWDn3VqbaxzrdtbvftFwykOubu7WeIkJE9R0Fd2dt2eHYf8M+orzHEMuYfDi1tlgnwuKCWUKnrwpYNfNrGYy9w3xFhm/etsogZu0fQy0+HGH2FPIB2rEhKk/Ly+tfQD49bm2tvhb1cbxSkWynbJKyiJ/wDFskRPuBW2ClaR8nrztKvbhSmkb9vPiCCT09ZqU9oGqrHQnYHj8AylIzeTdN5fLQeQVCSmR6THPpUDY1Cxe5E29l3q7dHMqCE/iAD/ABqG5xm+7QMtclD6iUr+z2zKRJcWSBA9vWnqKKjd4RtoQlPVjFctoF7UcSuy0npy+KV7blnvG1KMyOhjn1FQXE3V1bPs3FoS06gnlHUV0T8TeKtrDF4jEtQGsJb27FwpsyO+WCpQ9OpM/Oom12f2atPWeSBdtEPMIUEKTIXI6zNcHs3Wc9JvtbPsf4r0YaPVwhHnar/Ih1tdrzWQt1rBTdOKAKgIE+sVeuOtXTaNNrcWQAJJJNU3hLNlrLkoBDbZhKhx9atiwyiEIQ33pMepr13Fyqj4VkusUi1UkzuPlPM/jUka1hfNgd2+pIAgJSYHziq7bzSUACTt3RAFEOZtIXHO7yn0rLaksktuix7PXuQtnN6nlOQIj+dL32vrt98ulRBjok7fy6VWLObWVR5+/FKuZkgE88DzqYwUXkalu4LEstZ3LDqSXFFH7w8z9acntcXQKi24pIPQDr9TVXWua5O4K6TSi84kJAAI8gKpxb4Q2WczrS8QpJS6TA53UsrWNw48FuLlZ53D+6qxYzACtoUePKeaIVm1knbz6zS9zfxMW4s611vdglQWSPaY+tG/05dIKlLSJT5CYqp288pJAKoMTFKDUcdRukRFJ6a8F3gthOtblTcJMekGAfavG9WXTygS5sKfvAGf/WquY1EUchUJmeD50szqZsqkqhfkAazcH4JvsW4dUvEx9o5EDwnaD9KURqh0q3FwkiZT6D8aqv8ApLbpVPeEHqINLtauS7ASk7pgHrFU4x4HfgtdOp1PIKTA4+9B/D1rZGo3GFKhxLgTHCiTHr1+dVezqsW6iCpUAcqIkzRjGqEOArSqU+YKetZbV2RSd4LMTqVSwva0kmRuJTKZ8oNKvaikgJQUcjco88/hVUjXjiVrQG1hEQIVAn3otetGVBO8lBP9rzqlG8UG6sMnOYt2NQNqaumA8lRlM9Ziqb112POuB1zFQAoyEL5B+nlUwt9XNrW6W3WwSDwpR6/M+dBu9oK0hYU0EoURJCwT/CkouOBKSOaM0xndKvqaUFsEcKG2UqH1qF3ulbR68aZuyLRV1BDxSQkFXrXVucVidU2y/tTPeLgxIEe1Vtq/Q4ymOR3KdxQk7QB04reOp2QSS7FJ5fsZyzOSfssT/pd5tlL3dMcubVCeB+8flJqCX+Iu8c+4xeWzts+ydrjbqSFJPuOorozS2ksximP1tZvqF/bhQ2KJMhPETHpTnq3WumO1E47H5e0LWQZZO7IlIDu4mNhV+8ExxM9a0UtrFH1OUS3yeoHl6Vp91XUpjz61eWr/AIe72zcTdYq4RkbIp3bmRLkefgnmPb8KqzLaausYCVbHE9ORtUk+hSeR0rVSU+CgjT3aFkMEgWzqhe2JjdbPCR9PSpEcRp7W7f8Aoy5TZXhEqtbjgz/ZV0NVwEqKjyJPEnrXiCthwLQrYpJ4IPNFVwNJIll1iMzpR3aWypv+o6yFoPvCgfyorGdobdgSh+0umFJPC7G7W2f+UyKEw3aVk7BgW16lGRsuhbfEn6GnY3Ok9ToUZVibiAdjplH0NZ7d3IqQ7WvaY07OzPPMmeEX1ohf/UkfxolOtbx1ALGTwjoA4DiNqv5VFLzsxuCnvLJ63vWzyktOgz9JonSLb+i8gtV9ZXC0ng92nkH05opdmBIf6WZrzusKT5+L/wD1WU/J7S9OhIBxt4CB5Waf76ynS8MvJ+g2srKysAMqjPjUyDmN+HjUbjZ27lMtqP8AZLiZq865p/SFZE2Hw25dIJBeuGkADzIJVH5UiJXWD46Wd5Z2mSCQoFSiSD1B+ZpryTNlf6icU+psokxKoHl/jiorcuuBuQe7UOgiPyoNjJvsAqhC1dNy0An863S3Er1Lmwuk9KBuX7/HoWsbilVygSR58mpnicNoq0RPfWTjyR3iVG+A+vXnmuZ7jPXN7bKt3Wrcp3bp7hIV+IE0A3cKbJ2cK9KHG0JpnVd5aabdl0XjKdhmEXfX/q5q+/gl7POxbIYHJZrXGljmbxzJOMsm5sBc2jbY6TJIBkz0H1r5uC9BWPCN0zJPnUn012kah0mtS8RmLvGpJ8SGX1JQsf2kzB+ooq00ilZ94HPhR7As62lSezvSLqSBCra1aQY8uURTRkfgR7Al2zrzmg7K2aQkqU4xdPt7QOZ8LlfIXA/FlqDFtobumk3iU/7ULU2oj3IkflVqaZ+L9l9kIdzT1mVDxNPvuqT+ISK51GURuTR29ffC72DtY1pzB5PJYe2WopQWnXXUAz594hRHzo60+CnFXVim6wur7ktqEsrt3mwo/wDF3dch6c+JZBvC/a5y3dSPvttvFRUnzEEnmPMipxpz4jttu88dQ2iWGlKW2xfNKtluGfu747vpPU1zbZbjW0455L3yXwQ5ZZceOq8rdXClCHHFJcUfmeD681DdTfDLq/T7rnc3v223Snm3cuXEKJj/AHCn86m+ivjSu7zENAYbEvFhCW0NM59jdcGYOwTwQOY5q1tF/EOzqkKtMzpXLYlTk7XLpkOW6x5gOp46T1iiSfcijgTB2Fnbdsuh8YLVLWY/pDZ2t4tL2/ldy2kifPrNd0fHdqS00v8ADzlLu9FuphV5bI2XLfeIUd8gbfXjj3qI6j+GvEYTtZ0NqHGWzaWrrOMXgKGpW2W5e2qUeVAhHHp9Kj/6U3K7ew3DYsPNsLu80yvc4JgIbcJ4AJPUVM9eLVG0dGU2ox5Z82nczfajfffsbMvZW/JatLRpG7umwI3EDpUt0LolHZ5cIduQzkNUggW9pIUiyUeS45/b56Dp580xaU1AzpaxescW4o3jyEi4yDo2rWmPup80pnyn5zR2CyLLLuSunX1JU0gEOKV4ioHy964Jw6jr704LbDu+7/A+36SXsz2BBdTqy97r9or5Yv1fcmfal2ds3fZ640+8L3JOr+0vXalQtbnJMD0njmmq0YtGdAYvCXUm47pDZUVc8J6bT6VFbfUuW1XkP9I3rj9m2g7GxCRA5iARJ9zNbM3RXmGVK+6k+R4A8q9jR0Y6GmtOOEj4vq+r1eu15dRrO5SIscAnCZNxtBSqFTuV1p4ZUErTuMHyFFZxofbm3nIV12k8cTQqlJX97qnpFdabfJwtpBzCglRJPzpYXBJJSElXlzQDKk8KSnxdfaKXDqzzuB5jarj61Sr5SGHNKUTJVKh0Hl8q3S6pXCkx7HypNDqyr90CPoDWbkrc2qEK9RRJAn2YYxB8YAPmeK0ce8IUesxHnSTVyUkgwY6Ga2U93hCkjjjkUs1dj9KFkySRAB6jilkvqaEJV4vQeVItuEOHaCoDiY6H0isW4laQswDMHnikn2E88i6TySSUqHn60kFKUJPA9/L5V6fu7iYTHXnmsQQVAqHhHoYmkVlHofIG0L8Cj0FetqUpcJO761qttGzfO2PKtQtSFiDKT5inSfI6QV9oWFgK6AEmiLK8WRuCyOPKKESAtQBUAoiCYr1J7kbPDAMz0olteIk0+EErv3N0lUEn8aVbv1hKudsATB600qJDhUVCAeIH86WSpzYdpgn8aJJpYGvDDEXjyjKSQPTd1r25vXISXAPQUAHilQUIJjzFbFSrkStxCSOQRPI/9aHGkPPcNYunQFKQvb5wBFJO3q1JKd+5PEc9KHMhBkmPUGk5CTKQYBkRULmkDwEfb3z4Uurg8cCn8qWGmlgxwJI+XpUVDqo8hz0I6VZGL03d5ttpLCd4KUlSymEoEdTWUnnBXzZZYnZX2fnWWh3b62sg6r7SttQCYWoRBAPznrXL2ruyt/T2qcpaXFqWnEOcCOU8ny/713z8Llg8xox1FqqSi9dBG3rB6/KarntXwzOU7Z82m5ShIWwxBCeCdpB/MH8q51OpPBo6OMxls/oq+sbq0uXXGWFkobUeAYg1Jdf6mwOqtL2t5lMK23kX3QlTzcpUgefP4/jVy667HbV67sGrRfduvEqO0AAECfTzqM6l0GbfSFxb3THerbbXthIABjitt9JSJ2rsc/ag7G8VfWirzTOYZuAUFw2rwhafqP51VmX0vkcSEG6s1obXJS4UGCPY1b9phLq1uls2y3GHHBtKZjz/AIUHk8tmLGxXhV7LhttZBJgx7TW6mwKRcaHlFagbTyJJ9OlTfJYfv3FrcYCVdZSIpmXhFbkkJJ8oEVtb8gM9tc3Fm4FMPraJ58KiKtXsgyidQ3l3aZfNu2qW0BTCUuISVq6QNyTPl6VCRg0rBBASU+Y55oRzBqSkqBKQCSOKl1wBfruBxqXVpOcWkgkEENyKyufTjeTJ5+RrKnYG5+D9O1ZUbtdQPW8JeAeR6j71PVnk7e9H7NwT/VPBrFpxxJGOnrQ1FhhVUN8aukhqXsNyN39rVbHCuoyOwCQ8EgpKD6cKkH1Aq+apL4zL4WPw8amBmLjubcwY++6kUnXc2fGD4edqaBd63zC/ukvcR7AD+VV3dMrbO2CqDO72q93sZp/Jahvr3K2V9esm7cSpqxvm7ZauBAClMuwQevh86dv1L2UhKWl6H1e8pXG5GorcgH/+v/xFappZRHDs5nUYIPiMHoea1RuWZ8qkOq8exaZS7FrbuWNom4W0226vetIHQKVABPuAPlTN3SgncPujrFaOp/KUsmimwogrSSPKK1bYWtewcD50sGihJhRkiBPIrZCHUHyUTzIAodpcDMDSgqBtUOshU1uwhIWIA3k9J4oi2ZUsDekkdDHBo1mxbkdQojpAFUqaqiG0N+yH0OGEqmYR1pxUhT6kpdcKuf31E0521iyAncEqcHKTAowY1tclQ56kCKmWM0Va7nmKyF5ilIXZXjtsoDqhZA+UHip9p/tW1HhA2m3vEhO7xJaQGt3HnsKfeoWzZtLkKUW/IAinFi0SoFMnkdOaycG+e41TPvT2dvjL9nmhMldAKe/VdtcT1AUq3SCQT/vGvnx+lK7QjlNYaY03aLDirBl29dQD5rhCZ+gNSrsi/SF6cwnZPicRqVnIpzeHs02aG7S3C0XIQnakpVIAkADxRXDXa92h5XtY7Q8vqvLJLT924O7tgveGWRwhAPnx19ya82GjJ6uVhGuEsELSL9Nuq7dCkc7Ux6VJdG2Sb21vXLx6d0E+s+woe+yKlYRNqRtIMjjiKR0i+GrvuFLI7/jpXq1SvgwbbJZi7VS3n0tiAgAJn0itscSvKolI+6oQrqadsCwlp673KJGwmI44FMuN3Lyq3BJGwwIHFS1m0Zq0qsXyie9dSPIDp5TNI2zCTO5MH3Fe49P2rIqSV7tqZMiAOae2rZIWoKG4+hrS/hSYqVje3apCY6nnjb1otrHIeSmEgCPFzTm0y2RzwPI0c0xsbAgQfMU3Q1TGJWOKQNoKpM80vbYxSipRSIPNPTjCVEnaCPWt22+7Z44Hp61N+SqyM72NJG5LYn2rdjF7gRABPB8qdUtOHw8bT5kc/IUo2ktpgpn6c0s3YJZGd3G93PO4n0rEYze0OPPp6U9qKXEwUGB7UsyiUgQPatJS8k7ayxhctnYiJjqKwWToCYSFD0Ap/dbCoBHT0Nbm2CYE89Km3MGl2I89aFcJ2bQaTVYlpBIB54HzqSptd7glPB8vWlTjW1HlAHmOJ5punyVVOiINtGehUPMkRNei1UVGQR/KpY1ih5cx5xIrV/H8RwCPQTIqLygw2Q8Wm3eAmeZ9jS5aJkp5jy6RUstsIhxlStvnEdRWi8U0laEobIHSYii0nbQ0iLBhaihXdwpPlzWOWqiQDCRPMzUzTgUlKeR5ec9a3a0w13g3+I7o9eYpbrzYJO8ENVZ922D1AEhI5+lJ90XUgbQlJ/Gpo9hUouAhDKjEbY5pwGB8KdzUI6H1HvRurIVfJXRtyPLcR5EV0ToPHG+w9u0UFQLCSCeABFVzb6WtS8FLSqAocJPX1q/uynHtXLTNuRCUtJCZEg+5M9a5dVrBqkSb4fWVYLErti4ll8Xr6glajBlUifmCKhvbVbhPaAvJhaFF1otqSJSUbFdPwI5q8rDB2uC1MbS1Er8LpCeO8JSJ68eXl6VQfblYRfWzzQVvU86Ni/ER909ffiudNSlQ3FEdXki5mMTK4ifFHPT1pLWt3b3Lj1i7sQCwFkevXj5mkMa6WnrVbvBQkqHWR/jmo3nMmFanW9PeFCEn7o49v4VvngheURfJYG1aYbdWgFxaePaoHltI294p1bYKSVcn+VWVkVqvFhUQqCEgcAUNnWUW2Ex5SlACgVq2AcmeZirTDkrW87OlZC0YVatp3pEKEEyJ9uhpmvezi4ZA/wA2RuAmZ61b2ByluwlbSyUhaeFREGgr65BUqFK29JFWpVkdlJ3ehH2UKUWkp8yAKjmUwDjUJKPDEk+Y+dXleqT3RLitsGPWobnbZd8A2220hlJ8RVxuPufWnG3lkXkqc4JMn9oPzrKmysNbpUQXW5BjgVldGAs+7RJMcjjoKSWogbgShQ8xWLMGCQTFJlR9BXY4qXJ8nvcXgH1Rc6gZ0teX+PyamXrZO9CdoO6PIk1zB8cXbI41oDDaTydwi2yF8WMm94dqO5SVgSonqVo4H9muq9WPBns9yKyACtIT9SRXy/8A0oOX+26+sLNICwxi7C2+Rl92ev8AbFeVp6cXqTPcjOTUI+TizN5K4dubh0PuoLjilfs1kA80zpyt4lRIurg7jxDyuv416UuNeEK3DzEzFJpG88/d688fhXWlR3t0ge5unXXCVuLeUeTuJMfOkU3SmVqCAowJ5PNEuMFawYHsAIpFVshS543e3lUqN8FJ9zQ3rrsTHSIrUXrqRCSQnyo+3xQdA2DePOT+dTbR3ZdcakcCijbb+a1edDjSE2ivvtr0yQQOomZpRGXutwWFBSgZgir4HYJaoQQeXP627j8KFc7EmWoBCExPUxB96ncOynk6ivUhKQhHmehrdvVt60RKUH2E8/Pmp7ddnAcvVWtmlDiWp712Ttn0HrSY7Llkwm3cUrgylKuBVKVZJTpEPGsrpaQCwhZHIABoy01vcsrj7K2SP6yiPpxUn/yZXDayVW6gI4G080Za9mg7sqcZ2Ag+xA+tLfa5HRHGu0V4OkHE2quJBUVx/wDVRtv2hB2UrwWPdBHJUp4QR8nKdX+zdveNgWOPMD+MULmdI22JxxceI3cEbuJ/OlXcD1vWyLppXd4HFhYTBXNzx/8ANoPFa7GEuVvOYLF3qjyE3BuNqTPUbHU/nNMjQubgqYtELdSeoQKdrbQb7ndvXygw0PvKJgJ+Z/lTdJCrI/Y3tjuUpdIwtoyh0+I2rjggewWpRP41JcI4jIWyshZqU5blMOceJpX9VXp86gqxhcMysBYunPMoTCfxPJonQHaIrDattgwkKs7s/ZLlgp8LrSzBB4PSZBHQis82HBOMWkpvXHN2zckCOnE1IWVAb9ygraeeYimdy2Ti8jkGZCktvFCVHzAJH91LN3gVMJAERM1q1eaM02P7DqVKMAgRHWj2iD8hxxUbYugsJJIQnpyaNZugk+FzcATwaTbKpJD4SCIA/E1uCgJA6KPUGgGrxA8UwZ86SdyA7ziJ9SZmk0xMclubQDPIPStEvOrchDe5M9U80AbwuIkgEjqJonH5tNt3yf6wH3T0pW6yF5ygl1ZSpJUTPoJEUU2ePn50DcXqLpYUgQNoHuT5n8a0TdqQfve0mlbkVKmOsFIBkiR9KWQNqBKhxzTazdFaB4vKvVX4KQkwSPKaaslY7jmlYU4JUQOR4f40UgEQlRHHXiDTML4REgA1sm+TIO4qHSnTLT8kgYaDUkE7Z+8OeK9SltsHeQoE/KKaTltqI6pHlNZbZKFSEpEzJJrKXNCtMf8Aa0GEDkdYKa9bDZUB4fkeQaY3M8AopPI6ccRSrOYbKd4EKPAKiSYp5qh7kPpbAaAXG5fRSfMelEstpRG+SOeXOUz6zUX/AF66lQTKFEdJFKjUimVIDsQOeORUbXdMHTJC2pJcKSdwCpTtNF9433QTyFjrPWoyzqBkNhxxUDmEyQfxrR3VDboSPEiOpB5NJab4GpEqQ02VIUXAFEglAPSrv7LDKUoShCFFHJWQQoTXMY1FCoKSBPABj8TV89nOonE2bC2nUMTaiSRM+4JHNZascUXFlxa21McBrZpTe0obtGTvbPQ+KePpVE9pWpUZx63Uykd62tSVDmVCBB9vSnfXuqLjKX1y64sSm3CS4UQTCT5Cqf8A1kp64aUZX3jO6EgcEGCPeojB1ZpuJFYXq+8bBbgd2oEHyqEaidCNR3Sm0kgpAMGfKpBbZLunGU8JKwRKz71EtRPkZ24TI6xKa03NYMmHvKbcbalf7hHPUcTTRqQhOIslEh3wnieRzRymy4w0EmCEkggnnigNSIaOHs195JSCCR+7z/3pN5qhJjG0hT1rubkpAnk9KDYyYbV3avGAYG7mndFug2v7GHJTBSFfx4qOXoLd2AhlQUOpSZA/KtVUgWORsyNtZOu3O15XfBwk7vCBz0E9aheatLlBX3K+8ZHAMx/3NPmUJfv7iQlA3nlZpiyCpdUEOztAI2mIrRCwNXdL/wDLT9TzWU6JXd7RJRP/ALoGsq93qO0fdNSiOelaLJgR+HSve8BUU+nlXkj97ivTPjbEe0h423ZwoJ4U482gefVQr5S/pBctbP8Aa9kVXDKrhKLhloJBj7lo2JH4nivqj2tObdG41qDDt430I8jNfHf43cmcp2oZbaSo/rC7kxKeFJQI94RXkaVvfJeT34U5xXoUKl+zee2/ZXAV8JhfPX6V2X8P/wABGO13phjU2u8lc4XBXSIt2ccEquJIJStZWQlKePQ/Sa4mwpW7lrRslQUpxKQD58ivoX2Y/FfkNG6Qw+Jz2mRmLbENqasr6xuAy4hKgU+JCvCVAcBYIIk8UavvXG4cnppxumVp2s/CDpjskyViHXbzKYvJNuPWN9b3qAtxCSAQ4gtkoIkdCZnrUEf7LOzTHW32nIP5BoJ5LK30qKvolAJ+lFdvfxJ3vaNq37fbMosbe3thZW9oHi6llMypRUQApajySABwOtU1fZpd4FPv3RW7++ta/wAgaScttSB+hP3tQaAwNw2cVpBeUS2rpkrlSUKHoAmTUzwPxTYLBtt26OybTqWEnxOfaLlTgH+6VwT+FUDj2bvOh5+e7tWSN7g4BJ6JE+dDPKbbchCCPVSyTIqqkT6M7Hy3xZaWxGAs71XZNpu5VdJ/ZpfTcBRMTyA4IqFp+NbDXJUh3sN0RPPiK7pX5FVVNqRlCMTptt1lRaDXO8eGdooNrC4u6Tt+xIjiFJUQaMWHYv7HfGLpS6tlJu+yTSNleEbWlJYcW2T5AysECfnUw0x20217ci4ymlNJpslRt/V2O6DjpLipH4H2qsOwb4QdW9uGRS3pdu4s8MHAi7yV1Dlmz6g7gQpQH7qZPI6AzXQ2qv0Xet9IJF7orU2My60J8do4ldmXPkklaf8AqA9BV7U1khzUXll0aX0fpu6wrV9e6KxdyMmyHbVKLBCXACJHhO4R5k8Acdaq/tx1pYdgrFktPZ5oLOG6txdIVcY5xLqW9/dkKQlwpPMQUmCJ6RTFpz4le0DsWYVoLXmjXL6/tGh3DOQyDtmtKRwlaYStK0/2k8GPnXP3xG6/yvaDb5XP5ZKGX1sM2zVs04txDLKViE7lmVGSSTxJPQVybNRTqXymyknG1yWTafFvpHPrYt8x2PaPx7javDd49hwAHrCm5CgOOoJ+VNXaReaN7Tb3DMo0fh8Gwn717gnXZcQT4gpKlEEx9feK5X09mF3F02i4SCUslKuYnnw/zH1qwOzjNJs8hdruHFG2Y3umZISkDk/lNbrDIkm+CP621RYaR1FlsVg7NDdvbPrbbuXgFLUkKgGIjkRVeZfVGSzDu+5fW6Ygbj0qUa60w8/nLrIXD6W13a1PJaKSVQoyJnpUadwZQjwvbuP3uKurGMz7qyQCs+/91OWjGw/qvFIKdwVdNg8dRuFBXlmu1UQ4kCeevWnjs9YS9rTDJKQsfakSCJ86rINsu7Ou/wCkrwkgqLyuKbTcltqUoPX1PX5UXfEXFw+UlP8ArFEkR69aQLJKEEJmTTTxTOdrcbMXRU5KlEAiDJ5FK29wkgKSsyJ+X8KQ7jaVcSfMdaIs7Y93tWiEg9D508jWOQ+2vCsjYZk/LmscdWCQTAnn1oVCFpQQYA6RRLTTgA4Ow9QTNU7Tux98CqLophUBRI455pI3x72RAVPkK2fdaWjaGw2sEkKEmfY+lDN2ywhJHQ+cUueCk2OrN6oJUJ59T1rw3y3DuCwEp8vM03lhxO5MiP40uxbOxIHIoElY4MX20Ru5I4APWvEXxUQOdx9DPFI9xKdqZn+NFW9oogSPlA5ob2qhbVeQhu5VEqPHofSthc7VTxHoTWn2JavB1A6yIJr1WPdHhBE9YNJYyUsG7l949oJUevtXibg+JUkHoQDz9K9t8atfiIJjiBSirBQBCSZBoT5YeoGbpaVKgj50Sh5wokkQRI5pdnCBSCo9Tya8RYhqEFSVDyJ6/KoaXYqK8Av2ssnxdR5V47kVwBuAHvTjcYZToTA8p4FaNaeW4okEAgc/L1qWqXIXYMLsKbJ4KvTzpE33iJjgCfOaMOIXvKRJPQGIr1eCcIjhKz0B86d38wAf6wmNyYHnXQGkMmhjTNgoFRUq0QOeNsgHiqLTp64U6gBCQokDqeata0UcXgLFpZCu7bCdp5EgelRJqquwypG72YU/bZRTgKnFrXChwmNoAqI4a7KHEyNwCVJn05p0t8kV4W+CkpIXu5HHy/KmPTjyEB8bd0meTHrWatMtvuPFudjjRWo/e8HPTnrUczpBzzqjCwFjk9DTq7dFb1olQAhz7yTxFMGbeUnIupSje3u3BSuvrSSvIlyOhfDam9oTInj6U3Zi777AMNkRtKwOOetbqeU7s4knp602Xtyo4tsyQQpcR86PUZppdwhLgICeDG00lkn5U6kgCR6dP50lgbkFxwFZjnypW/cQQTKZn1oSfIk+zIve2tv9oS4Q24tQgpWeCfxpivbVhhLyy00CkkpDZJCT5n3p0yjba58uqk7fKoxfpShkAFYdJ9QJ/OtU28UNNJgqr9O4zbtTPm6Z/jWU3qxO5RPepEmeaynT8FYPvOUj6+s0mQd4SPu16TABJrRErdSBwZmvVeFZ8XB20NXbQSjGaaaE+K6CjHoE+X5V8TvicyhyfaTeugyVKddknmVXDh/hFfZ/t9vDaOacEkJZQ+8qDzwj0r4ddtDxvdb3ZWrcttptCuTyYKvP/eryenXwtryfRwX/AHa9CI4FtLmo7HcCR3oMg+U89OasjU+et7Zt20sVODcna4srPTqUnnmq40knvtRWySk8En1PAJp7LDjj7vJMH51qqlK6O/jA13pUFbZ2k8z6e1TTs+7BdXdpuVx2NxGOVc3WQfSzb2yiEFZUYkk8JSOpJ8qur4a/h1YzpRqjUzRNqgn7FZrB/a/+0WPT0Hn1q0NZs/0dz6LLDuGzegOF9klC2x0ASQOD7jpU/C5UkJyfCKr7SPhxV2Y5RvQ9jqGzzV1abXsleWYKmW7kiCynmSUDgzBk9B0p40R8LOVzamrjHaWvtSrHMpZcLRPr4CBHsTXcPwo/CbjnMHZ6p1ZapuO+Pe2dkSdqk9d7g85PIHn1Ndi2VpbWFsi2tGWre3aG1LTKQlKR6ADpRNrhBGLSy8ny7y/wr6tzOn/smT7L7hvotK2A7KCPMBLpg/j8q547QOwHJ6Xul2wav8U4j77F2wd6Bz5Hafxr7pVGNfdnOn+0jDOY/PWDdy2UkNvxtdZJ80L6g/kfOs1QbWfGPS/xxdsXYtgmNJ4fKXDFhZt93bt5O0tndifIo2tDj/iVW1t+kL7d1ZAXStaOymT3P2RjYfmnZFWx8XPw8I7PbrI41TYfW239ts7wJjvG5IBj6FKh9fSuLEWrd22pSCNyOFoI8ST/AHVtb7GWyF8HXrnxpY74htMN6O7ZMMy1kQsLxGscQ1sfxz8QFuNfvtknxBJEj90kA1R3ay0+ME9j3y0b4XLduotqBacPKg4g+aVAAj2I6VWarbvRwVBSTII8jTrqfLu3ej7VTilF5N0hJPSNqFx/Gr5VMSgoO1wRnCvGzyKu+RCk8GPKrB0Khd+/krdpzunH2VpCyZEnpP5VXeOuhfXaX9uxaQAvqJP0qwtBLLV044mAEcGTNc75OgbNVZlrNXSnkyhweBxKudpHBE+1RpxKymFJTt6yKByzyBlbsBUK71XKT6mh15O5WNnfrCIgiABFWTQrkGkt2ymVcyqUkUX2bsFWusNAiLgKJ/GmNTneklairiJNSTsv2HW2PO0lCVKVMdCEn+FVTqhX2LPDhdVcqIgd6eDyYml1vjak+EpSSUgcHmJ/hTabgJW4pJncrzEVum4ChMGZg8dK0ikvqYvIdbLSFqMwpXUmj2SlDQ6lU/vDp86ZWnQ2owqBHQ9aXDyt4SDx1CQZp0qtFu+w+NomISEgiYiikfs0KSYB6AimVm+iJ5IPHkRSyL8iTPU9Kl4RP1HgW6Fgjg+fpRSLZtKCkyVK5mmZnIpA6kg8cmtlZYB7wHgcCpvIVXceDYtHp1HUmikMNgdOPUUytZUNwI85M14cusuEdB5Ec02tysLzgf0W7alSE/WikNJSBAB9xUeazBQmCoEj3rwZtalJAPPlJolW3BSXkk7TTYVzCfMbh50ohDTigpcpPUzUYVnVAwVc9OOtbM5lahAkx61EUsMJJdiVtttNtE9STIiIr0JaXyUGZj/AqLO5h1cAHkeVKtZJ4Nbi77H1pSw64KWCWbkMqSkhJSJBBgzXsMBRIaSqeflUMXk3FvGFk/Xilf1m62iCtQBHUHrUuDviwTZLXrlsgJaTIBjr0FLNuMMkhQWtJEgAc1CGsu43u6qCjwVHp7VtcZt3anxAJ9Krbu5BN3RO23WSvajdt6wUz0pd67s3DtKwJB6mB0qv05pxttR3EcclKomkGsstakqO7bM9f40pQa4FjuWJZW7TrjQKykbh4gRETRucuQ1ahCTEEgAGSKrtnNkOpSDMHjkzNSfvlvY1ncsH59ajvRos8hFvcBvAvAcmFbiSY60zaXugp19J5VsO09BM80o9d9zibhBKikA9PembAXShcOJHIgmSKTyg4Hhy5T9utwkbiFmRUfz12v8AWbgSoIlXA6D5RTk+Sbu3P9umjPupTcOEDz4mklWSVyO7aisNjlIAB8J5oV/ulW0LPAUeD60s25uS2oc+AfPpTfeBRtRAiVEKMwQKHgMPkzEKtmXVbBJUCSfOlcmtCUqMRz5iaasJKcktKQkAp6kcijMg6mBu6IP4/SmnfBRHspahxR2JDgPO4cACoRnlFpCw1CfI8HkecVY2RzFqWlCVggHomOfeaj2FxbWoM5j7Ytq23DwaJ9JPHlTj4QrorbvbkcAqAHzrK7Sa+DXHvNIcVa3RK0hRO8iZrK1uBl76J9IFHcOny969s099dNAn97oaSD28QPD70Ri9q8i0AADur0pYiz5TTS3orP4qMh9lCSknfb4a7dT0idpAr4idpNwLnWWSMSpLgSYPohNfZf4yMim1ts+vdBZwZRwem9cT/j0r4v64eQ9qrJKSZCnVH29P5V5eh/SX1Po9P+qxLRDPf6iRCRwhZEdfumPzq0dF6ft9zj10nvQ083uAIgpnmq77OGgrLXD8phpkk7jE8gcVdmh8JklpfSbPfarVuU8rhDYIHJPTpU62rHTjc3SR6mjo6nUTWloxcpPsjonS+u2sfjk2q1tt7QE7QICR0AHy4oTSTNjq7W93d3bu9hV4gK9SjgeXnFRzs90YvOvJxlkh3UeZcBNqli3cLa0gjhKEypyAeVGEQOTT5lNH6x7FtToXrXCPYu3y6VLZccSylKygDhKGlqCQAU8EzXJoa3v3cIvb54PQ632f9hgnr6i3/dWWvrWEfWe1t0WduywyAllpAQgegAgUhYKUt65K1EkLKQCmBHlHrVT/AA69veH7VdOW1gu7Zb1DaNBLtsVjc8kCO8SPT1HkauFtkNKWQSdxnkD+QrpapnlLyKVq42l1BSobknyNbVF+0LtK092X4JeV1FkWrFgSGm1H9o+uJ2IT1Uf8GKBt1ycqfpILq0tsRpne422+GboulaZ/Y/s/zkcfWvkfat3+WyybTE2T+Rfu3w2wwwCpbiiqAke5npXaHxOdrh7bdTZF++v27K3umu4t2EuhRYYBO1IHmepJ6EqNcpaYf1H2I64sdS4xJfbx76H03DYDja4VIKk+Xl1FdEcYZhxbQ2amxWpOzrKKsNWaay2nr1P+yyVmtlR+W4Dd8xND6hvEXek7VTZC0KuVKO3oRt/719Aviu+M7TXal8Il1amwYVn9Q9xZsNOpSvuHNyVuuJJ5EISqD1BUmvnpkWl47SGIU5CUvvOqSmeogCabpLBEJynhqjzHYO5tcNZZQqT9mvVOobCfIoIB/jUz0O4lhvI7dohO6APLrUJxFy89bpQp5S2WzKLcrO1EmVQPKfMipVp50stXqmyElSCIj2qJ1WDZcZK9yiwMjdEwR3ivP3oU3G8jpHSCfL2re/UoXT3IBUomD160P3e4AhR3+fMUYfBSFCqFkbeI454qX9lqCvVzCgkpIbcJBPU7DUSQlKkHeVEeUedS7ssTOpllUwm3dVJPXw0XWSfUmts0krIWEyVEkn1ohGxPoVdPShbZRVvAJQlQiB5VuVkrAB4iATWuLMYvyEhoJAI4JV0mlVpShPjMkeUUKlcciVKHn5UolZUgFXEnmmnWGimxVG4Qd8nyJ86WKElBJUAfIJoVDgBBCTuit0PGTP3QPMRSlElO2EbFAQlUJnrPWlBKSJMEjrQqb2Bs429RzW5elYMCPWalN3SCTTQU2XEmEmR6zRCGkBJSvlRMgcce9Ai6ImEya2bvlhCdyd0cA03kLxgPQ14+BEc+tKotEmYVBndx50Cm+UmfEAT1BrdOQIKlbwoEcA88+1FOrG2kObNqmRP4j/HNKNWyYMHn1ptGQXsEEKnyHlxXqMoNpiJPtUcZoFT5Hhq2Stckc9JokWyeBBIHt0pmezHeuo7tAZAQlJAVPIHKuR5mTHPXrSrWTO4ALMT59KNyfIx5+ytpBOznyBrEMAkEoAJPrTN+tlKWNqiJPNK/rle1STBIHUdKmq4KjKI8LsmlR+7/AI968Xj2+OSqeu2mu2yTryQQqJjqfyrS5zLjLuwlPHSPOhqsDVNjmLFoqEplQMhXpW67VMEEjd78mmwZnwEq4B46c0g1lClwgL2pjkkzFJ85HwPFrjih5JUJG6ZFSJJS1aJE+LoKibGXC7hKEQo/OpEhZcs0qX94knj51DtvArpCLzhVj7lKiAryjmKA0+Si7cUJjaQZ86IW8RZ3EEE+kzQWDUtT7oII4kmnVLIrbwHvrULpqJjd1plz60FbiuAqY8+TNOrzgDyTzuCuZ9KbcswXVr28qJkQOKleWNUsBjSwlhoqT+4Pkabbl0C3MEQXFGPOjEE900CRITFNl6D3Lkz948k+1ONWV9RDFun7eCQQSkj5VmRFy8lXccn+t7/486TxKQH0uFXSRx50si9FkFJW3uG77ooaSJTzkiuRNw1uCpKupjoTRegr1xrWOBWpwlxF4zJ4BA3j+VJ5q/74ObEK6j+NCaTt3rbU2Kfc3tn7ayoAnnbvT/L1ojJPgGfZvGXbIxtoPFw0jqn2FZUbxGYH6qsuXR+wRwAP6o96yr2s4L9BjY7RnnANroIHUqgTThY9pbtlch3c24EGZmJrlJ3WyrTkLiR5HgfSk3NfvttDe4FqHI5rmU9RppM3Wlp87S1fip1gzn9C57IObUXNw2zblIPASFjj+NfIzPb3cjduqMhbquhkdZruLtV1q7ktIXDLi1bAe9VJ3cgE1xK/tuG3nFQlZ/dHMTWmmnGNM6dLTi23HuE6FBSnILbUUqIS2PDyCT1HnX0I0t2J4TRGiMRmu2bVR09Yu26XsfpHGJD+TugRIUtPME8HcoQJ5Ka4i7LHsdjMU9fZAINsw6H3FKB3IWgykggyfL2qc4/tixmuNRPNNu3y8ndElNxeeNTxAnlUkiAOJ44qNTShrNOauj1tPrNXoouGhKnLlrn6WdTJ+M7E9lLhtuyzQON03bvHuFZXMqVeX76ieO8XIS2P7O4iekdKgPaTnc9rxy81TrDUGRy2UdYKWE4+0U43aiPD1AASD+6kc+pqvsZZPJxblnk7wZ2yUQFocYQ0402TJA28KHnB8+hFHs691ForS1ybC2OqtPtJKRdWyd9zaf2HkDxAD+sR8/Wq3bJUjyZXJ7nyVxpzt4zWjsgy/eNXSLllYKXGR3K0EehEV1jov9KLc4zGssXWVu1OtpCe7yFsl6eOpX978TXC+r9a32u7oumzQhJmNqZP41GbTSmUyd2lq2sluOq6QIAHrPQfWtvmZSXk+g+rv0pOosoyLbD5pq0WsQV2eNAX9C4FAVzRrjti1l2i371yX7++v1ghV5fLXcPAHmBu+6PbpVZaUvNO6Jy6xnE3VxfNgALaZ3NIX5gSRMesVOh23acYJLYvHSszCGOT+fWqqu+DNpclS3jl2xk7gZDeq8bVCw5yoKHr605YfXuVwjkofLjSiAWXOQf7qF1BlP6Sahvso0lTaH3JQhX3gAABMefFTnsn7Asz2oaisrPuzZsPEq3rG1RQOVGPSPPz8qHJJWy1wTHT36t1NjbDLP4G1v30IWhu3U7LTe4jcVIiJ49JqJdtzKD+o7dmyZs2gl0Bhk/s0gbenpXbVp8IWnsTp+2tcFqG5srlKChT2QtUi1W9we78Kt6fdQC4npXFXa9aOuZ1FugIW7atrQsoVI3BwgwfMSOKzhrKb+FhVPghWBQUMFcbUyYke1S7BNoZxmRdkEhoqBHWaiWODjdrAAUQSYJ5mpJaIdexF1bW7Rcun4ShAHHzmqw2NvBX15JWFHy56UOEgk7U9PWKen9OPNKLbrwbWnwkATtoV/Drt1FTRDkeQ4Jqqt4AESNoK1SjyiKmXZctK8re8eJNquJHrFQt11Tmwd3tgcmY5qbdmZUm6ySgSItiRt58xTfJLpErabSkmR4h6eVEKSeAkBXHM+VD2p3Ang+pPH8aKacSFQFz6k+VbtW8GBohG+TxuA6HilEkltPEifPqK1IQEqjnpBEc1otxLrhAKuB+7xU8FN3hCxUJA4AJ86UCg2NwTvB4jihE7SoFSJAHnxSraitsEAR5bBTXFslKkLd0hfICfbiveCYgT0kUmFQPDPImPOtjyqAdoHnPJoVPIqaeTeIEkwfkTW6UpIPJ59+PpSJndH3UnyPnWyilIIHA8x70rbdDT7CqkI28kEHyPWt0NIUBtHA67aHWoAJB5HrXoBAVyoGesfzqHd5K7BQQ2Fzu4/HmttiFmZ5HIIoeUhuDwfet2N4VAVx0kKqeR9si7aGiQVFPTmaJQ2lQISRJ6AigxtWlQ3+MeX8qVQs7gFTtjrNNJMV+AlKG0k7onzPpW6G23EJUCAPLd50LPIA6+teoSorI3nj0NEo0sFquQ9DbYIKQB5cHqKU7ltweUn1NNpVyszMc8mK3SSUglRQs9D6VEcCwwo2ckblJCR5x1pR20aKQAUEjiUpiaHZU6YmSo8GeBSjtvJ3I2qPQgUbe47wL29igPNjcBz0nmpS0qbYRJiRHpUWtG3Q8AtUr48KRMVI7RYCdhMwojioeATQkopTjn1L+6D1NCYppQWtzjb5GetON8pKbR1Q8KfQUPZhH6u3JInefrxRubyKjV1Mkcjg9CKCuFn7THWTxS7jgbmR4iR502Xr5Tdq5B4J4606aG3VIMU8G+vA8qBfCHULJlSZniK9aeLluFKUEyYnzB9KFbKiXZUmJkjzNTW0eTa2QlC9pKkkn0kClblhsoJcSoJmSQIpNCE96NxEz5DrSV3doaRAMJBg+R/Cly7BDBmD+0lolABmIplTlHWX0OFsLLLgdQqPNJBE/hTvf3Ta5IT/xDgmmd9LdwlzakrSIG4gR/j5VuqS9RtLudV4v4/1WGMtLZzRjjjjLKG1LTkwAohIBI8HtWVyCu2TuMpTM+h/urKW6JjsR0WcspSwSfx5ArR6/73iQYHWmJL4HVUGfunpXhulFwSAB5kCJrKksIdNgHaLf/ZtKZBYcIKWlEAGJ4NcuW7qu6VKwCriFGfwroLtSu1DRl+k8LUiAAfcVz+GFK4A5MRI6U1wdOm1HklPZW+3e5G8wd4lt9i7QZbWdskRwD5Hz+nWp5gldnfZtnlOus5FeSSDAuE94GgeDEcE+5qmnLa5xbzV9bqWl1CtwUCetWbicDf8AaNpfJZNOKevU4hKXLxdr4nLUGYcIHPdnkEkQCOtc2pKcGpJ2vHc9Lp9PptaLjqS2y7Ps/Rk0v+0vSd65LN1dMNpO4TbTKuCngnmDzFNdx2iaWTkRe2OTyWFyTKZRfWIUhZ9QQk8g+nSqiX3QuwwvqT4CodfeiV4NaQXCdqVDgzJPyrfDVnmz03FuLJsrMaRvMyu/f1HllXT7hcdDdiltCyepIC4n3EVJrLtI0fikpDdzfKUR4j9l8/mV1U1tpq4fgJZWvp0QefnRh0VflwoTauEdDKJE0SXhslbUslm3vaBpO/RtfyF7sUonaccFR+LgoM6m0ggr7u+uW0r8Kox4QQPmFH+FV9/k/wAiuSmxWSPRMcfKirfs8zKgQLFaU+kjp+NJfDyJwT4ZYWmsp2b4l9t9tbz6h/tn2FLM+3kD8hVnaM7e9F6Mz7WXRkLxN2zIabRbqQhSSIKVGD5e1c6p7McuVf8AglH8qWZ7LsoUrJYc6cccCkFLude5v48tMqxDzGMxF4/dFtZS3dXG23DhBhRQhPjAmdv7OTBJrjDN5K4zmWF0lRbSAQncrnrJ/Ekn60e32XZJaQSwrjrAp2sOz/LsMw3aDefNQM0oQjpfKi/m5YxNo7q1ShW4uKMq3DrU60lafqzHOXzriUBaVNpSRylRpmb0RlGblDtww64oH7oB2ge9Oes8flGdKN29vZuqb3b1FI5EfnWl1gl5IVfMuKunYJTySd3M/Om88KIUTx044NAt5u6YQEKV3pTx4hJpF/Nvu9EtpI43ITz9Z4q6Hk8zCD36J4JTJ/lUq7MkBasoQNzhZAEH3qE7lqWStQVukknrNWF2UNgIyat3BKUyP8e9WvUhtIdVBTDhTuggyQPOle8MyFz8jTguxt/2riWzuKin7xNItY5rc6ktrSUbRws+Ka38GFtOqEftASBv446mky8oJ3fdAPEedGjENoeQgIJKwSPF6V4bBkECFRu6DmKN1YQmu6QGq7CEmeeJia9TcwkcHny6U4oxDHcur8UoHAB6UMuyabXtU2vdtkQfOpb8lA6rkgQFbZ4Neh9QTBkH+tFEJsmnAlawsTxBV0pVnGsOrKfEUzEg/nTtbSafII3dnkJJJBgzzXouyW1mTu8qcMljLewWEePkAg0gnHW+yT3nijjdSwnhlcq2DJvFOAEkbQevNbrvYEJJUSDABj8qdjpy2ONS8VOEEwAVQPrNDHCso8e5aiPKeP4U8TwKmsoCOQDQBWSSD06V6MnyUBUHrzT3g9KWmTDvereTtQVeBQER6yKGXp62Cg2ConzO4AD8qnCVMaT5G1N+4FSVEJiYpVWSWSOsCnGwwttf3FuwEuJIO1SwoBX1kUfkdEMWGQct0XD60oME7kwPypYRNSsjreUUoqURzMda3/WrkQJKegBpyudNWjCykOOEE/vKE/wou/0dZ2ti08l9/wDaepT/AHUt1qrHtbGNrJqRJiQB0NYM2sOjvEgJA4Ig/hTwnSNs8ytX2h1IbkEcHn8KTx2j2r4vjvnEFoTyBJH+BQtqyNqXA3/rwtJJBgnoI6fSt7bLL9ApPUUVb6bYuVJlx0JK9oVtE8Dr06UUvBWeOSJQtwN8ytXX6DrTU01RMtwVib955yQghswNwPJp4t70JaVKiQCdsnnrWrgDV8yAEtpQkbUgAJH0FN1s+e5P7x3HoZPWs0r4Q38KHhu+VdNPoVxtjg03ou+7vEMpVtBUqR9KHx90VfbE/diInr9aETcd5mLYc9SOD5U62+oWm7Hi9eSlkE7o3AgjrQV84VOuFIUnwySfOvMy5LHhIEHkGhboOoaeUDuRs6DkVC4s0dVyEY4TYpAV0nlVJtvIQt5O6SYJJ8qGwr3+YKJI3bj5HitWyHLh+ZVCRJ/dq5W+GOPgXcfSpSUoUpSpiUGfxpouXVBQlKt3QTx19aJeuWmXkSsgzBg8V66hNwlS2wCJgkcRUdikq5GPIocUpCW2lOBZ5IMAfWmhRSi5MNuLA4IChA9YqS3DMtpbWFAhO0keXHU9KZbtNtiQUpcQXdsxHJ948vKhSe2g5ADfcmMe6R6l4CfyrKb13bpWohSYnjkVlGB0izH88TKx4p8jWyM+sJQlaQsHqT5f31A1XrzohoTPAg8j3rb7dcIJCVKHMTSdX6mbT4HztKyXe6TUEkkOOpTz161UbCXO8BMAHg/+lWLrBK7nTFj/AFlukq+k/wDaoIsC2JVt8uJFRfY2jhGmQcLdt4Y3eYV1NTXsB7Yrnsi7RMbmnAbnEkG1ydl5XNovhxsjz48Q90iq4yN2p0RxyeRA6U3d/CoSFdevpV7Ftpoa5Olu3LsswPZnqy2urfbk+znU6De4DLtjclpKuSyuDwUEwU9Y+Rqm9QYK60zcObLlarOQULB3hIPI+YPkRVsdgPbFhMvpi67JO05SntBZVzdZ5EmXMHec7LhB8kSfEPQnymkdbdlOoeyzUNxonUbaLl5DZewuVTzb5G1JkBCuhBHPB8Jrzpb9CV3g9vpNPS6yP2dqp9n59GVXYarvLPb3d0evCkq4qR2GssohQWi9cAPmVTUEzmPTjLjvWd6bZyds9UGeUn3Fa4/JqaISuDJ4BPWu5U8o8bUhLTm4Sw0Wrba5y7hI+0lUiOQDH5U4jXGWaZJN1BHn3aefyquLfJ7iOAnnp60Scnv5KyQeompa9CCcI7RcyErSLlIE9ChI/Divf8o+YJSn7WFHpJSkR+VQQXcwoE8eUxWqrmVglR61ajB9h2T8dpWYg7rhKkj/ANkn+6jbXtHyqzs79vjnlEfjVcm7UgAeREx50XaPBuJmJnn+VG1N4EWexrjIXDat7qeR0CRQj+q7qzBefi4tFdUkRFRuzyELBkhQ9fOiHbkFlYeJLapBSroR7ioW0QvmdH4bWDK7uwIZufMtwOfccT86rTP6ZusA64h1slE8OJEpP/enK7v7jSeT721dU2wvxIIPl6GpxgtU4zWDAt7lSWrspghQ4X8prRt8rgKspiCop6CDwqJqxuyxAVa3pJ2+NPl7GvdVdmykuKcskweoYPT6elE9nuMucdbXDdyhTK93CVJ5pqRMrSwSVfLalJjhRj3rVglx57cfvR58TFEC2dCQSUxJO4iZoZKXG3nEp43KAiOOldFrsZtXkIWpbV00B94tGYHlNJujchk8SV8n+dEXC5eAJ/2cD1FJkGGUkwQrr9Kq0yGm+RQOFOPuQIkgDnzrxxHeLJ4CkoggVuvjFvAcFZEyOnyrx4JbUYTzs+6nr+NQ3fI2qA20F1lgchYJ9uZpfGA7lDkAr56EVjaOG1EckGeJrbGtLDwIIH7Tz5oTaHy7QbmQ05eLSSSUJAAV06eRpsLRUW1AkRAKVHgUTnHyLwrHKpjpQoA2pKAJ9AZip3tcDUV3H++SlvEMjqkyTHPSmtAH2YwsqQenkRR14FN4hg7SolJMRwPamlgFTJSSByPvdDSTpjccEh0q5ttMioq+60QSf5U0lv8Az9alFRG2PpTlpqWcZkyraqEQZEczTe6lSblxW0jwfvD86lvc8lKKSFNOkHMtp8i5MnqRTvnx3uTu+72hIUPvefSmjTay5lmTG0c8xBozLvuKyV4qFBKlwkk9RNKknTG0wXJEhZj+rxt55pxzaivGWiUq2g8wKaLt0lIAgSP8RTtkxssrVSfMAwBTlwNLuDC4V9mc5iDzJiaWwBU2q8lU70Dz6Hmg23CbB9ckkrkE+VK6ddV319u5SEAmFfPy+lLNC+ppbvFtdptURLpkx1EflWmUWUpdVO5MTBHWtIm4tgE/vkmflQ+acQhp0BQBiCnqKIruxO6wPWTuSHEHcRCQeOabLO63W5jkBR6cGisoIRIRCigc9eIpjsHEpsioqAlRG0da0g1YpK1kOs7r9rdSDBSOlB2tyE5i1R+6onxDyFe4xwF+8nrEevnQpcLWdtAE7Qo+I+pqnz8JntxwPmeJatDBCjuBCZApG+WlNi8oghRA6Hik86oCyKVAzMieBQtw6fsIKTHAkxPFc94NKVm2FeUnHOIST988AcRXin1PXa0l9JlEpSPLmhsC6lNm9Cud5JHlNC/aEounk8AlP3o5Fabb4BywYm8bYumypSlDd5o86elXhQkbRLcE8etRlD9q2ZdAUoKB3J/nTpc5BlkbkMgx1561DZXIjmcs7bsJWllXXmB/LzqKupuXFuKW71O7c5En5CpC5lWHUoccQAhP3m0AyaZ72/YfT+zWlO5Uhso5H1oUh/gM0uf1R+NZTp+t1p43NccfdNZS3MY8qtVIkJ2qX5wAf50KthanEy2ISPMRJpBy5l4ngj0gc1qu/wC8ACGwEIPPoaI2htC+uHza4jDtBQbUtBKkjiIjn86ru8cWSVb0qHQ9eD/iKnPaZcKTc4plY2hu03DaeOTHA+gqB3Cp3AJjzj39afGWO0BLYLm2XUg/Pn8qQ+xKUuAU8+ZNF7plOyFH6GlmgG1DcCOQesmrt9w4E04Z4JA3pj0ma65+HLte09r3Stv2Q9sF6GsJu3ab1ZP+cYG6/dBURyyTAIPh8jxyOXpk+EGU/veVbBZI9PZVZzUdRbZIvTm9OSlF5Rd3xI/Drqbsi1K7Y5e0QbfIGbW+tuba4diUuNHpDg8uoMz5E88qwj5dILzAWD0K9pHzruX4au3vD9qujkdh3bA69d6ZvilvC58KAucRcAy3+0P7gMQT06Hwnij/AIpfh41B2Ca+OMzKUPt3SC9b5BgQzdgdVo9J4JT+6SRyIJw0ovT+C8HZ1muupktZxpvnw/UpJjGXbfCilRjiFfzp8xunci6oJcVbNg9C5cJAP4maRxLiQttC+Co7Z9DUgLBQtW1W4p6keVdVN/KecIo0herAAubAkk8JukT9ea1XojKKUUpdsiPa6QIH1NODLapCoB3QCDTgxZFSFEqgx5nj6U9t9yW65GlvQ2USmN9tuEQn7Sjn86VRozMg7i0xt8yblvj86eLRtSrjaI2iQRxzS1ugtbOeJIj0p1WATvgEttL5Ntvd3TaiPS4RP8aNOm8mlEdyiD1/bJPznmt3GuNoE8yB/wClD5jUuH0y8lvI5At3BSD3DCC4tI/tCRFZ7Rbm2RjW+mMu5aN93ZqdO6f2agYH0PSoRa4jLWzydts8hY54EEfWrKGvtO5BTTDV/dpWshI3WR5M/wC9WmQ1jg7Fh9k5B64uELIhNsU7iDH3iaFV0yrdWSXSOZvV4nblmkuvNDa2omCR70aHQpwuBJ3k+VQLF66sClZd70lUEQJj86k2n83aZFTgS+Tt6bklI59J603FN4JecjzyolJJ5H3QY5rFAJUDMwegjk1u00orgmEkkyaxTKVEFKiRMxVpNYD6ijrYNwoiCCiPDQ62glaCSTt4E+dEuw4SVdZivXGgU7pH3eRFF06Qtp6htD1kBG2VeVIvskJWoqJI46f3Ucgn7MjaCIJmkX1K2uHcTyBRco4GxG2TtS2IAEGsxrZN4n707uSDzRXehDSUgGNsCRW1lDbqeBHmDTtsSGrLpK7iUkckz70GZQ6AE9OeD1pzv2N0GY8RIHtQTjRQseKVEyKSeMjUaY+XKivHMhCZAR7Uz2+xYAgz5U43iy1bNpSCpXdgcHiPlTXthtKzO0mOOs1jyVVD1jl93jL5SV9CmSoz503uuFLylqSSCiJHSnCwCf1FcpIMTHBppfle9KZPHVRgCi3YcYCcIoC9ag7D6k8fKi8u9/nMRKis8k9aatPhIv0NpJASOp5/jRORuDb3YHLp3mVA06vkboTu19dyABt5kcU75QkWdhtM8CUiTFR6+uVO94o75gdSOPPpTrfXCXbe3SVw4hPiPv6U274H6iTa3VWBXtiFGUny5rbBOKfdvwfAlKEqJ9YnigWr0HFKCiNwc5HzNKYe723N42hBgtpJKug5NL0DnIo+tYyNolIIO4mZ9qD1A4pPiJgjk+hpdSd99ZuL6JVMEiTPQe9CaoALRW3KVSBG6hSdiqx8yThLDZHCu6HTr09ajlo8hVnBAPiMc0/3532iIUZ7kDymYqNWqwLRaOfvecTWkXXAmrWQ/GrPeXA3QQmCT580lffs81aKhWyRzSFi4hFy7uVCikfvRPNLXQ3vsrMzIgx0+dH4kpJoW1JdJcY2j9nxIJNDh9f6vkxBA4r3LL225G4KVJ5HmPShX1k45scoJTAg/wATWd12G1gUwbu1FwmIBXME9aDuSEZJW9MAoUAN3FbYdxLZuHdoKtw5/wAfKkFvKcu3TIJCFCR1qlJplDWt7c442EyZkT/jmjL+7UkIABhYEyAKZC/suTydx4g0Ze3LikJB48MGm8MdCS7hxpcqJI8oHl8q9aU2sFwyTEiB50I84gpCuCZ6k15bOhxajI858VS2uQCVOoUonulCeeAP76yhVXTAUQSZ+lZVbhUSF3GIQhRCt08+M0iixQ640ndsO8DjmefWkri8LhJBBJ8xWY66DmRtW+DudA6eZNZJd0xJMD7S3e81P3QnY1bNo8X1P86iD4KVAJUFcTBP86kmvFF3VmTIEqS6GyR5wkCmO3xT+SuWLS2b33DytiUE9TTTTLoBDCVBJK/Lonmibdsd4kIUeSACeI8qcP1G2zjXnis/siWyscJKgJPXmmzGqi4SZ3DgwTVJvkkc/wBVO7yUvJB84PFFDDXYTLRS7PMTE1oL0biE+Mf1QOaNtLxzelIQSpfACeTPoBVW6tl05OooL0cX29RWSWwUXCX0bQOs7hXdXxGg9o/wTW2Wybnf5PSOoUY+zvFSouMLQJQT5xuAHsgVSvw1/DTqTtE1L3tnZhV0YLlzcCLfGo/814/1o5Sgcnjp1Eg+Lftn069pnB9jvZ5fDJ6V0+6bjI5qf/zS9g7lgjhSQSrxDgzA4Ary9N+9198PlXfyfUdTow6L2atDW/qyadfdXr9fByItlTL8JUElJkKnrUuxbaX2omJHkZM03YnH7svbFTabhvfy2oSD7EVJ8nrLB4hDlm3hW3LlBUhalpShII9hNenbq0fKvOARi0Lc+AAmfEknxCjbdruQBtTz+VHaXUjULKHG7NtMnaEp4CuORSN62Ldx0HwKS4U7QOnNS/KJXqJ2SFKfVKYMdIqTYTSrmVc71xQaZExHUkVG2obUolRSEjpHWatzs6urO/xiWFFAuEcFtSQdw9RT7UQlluhkvOz61btFLLygpPMgyTXKuvmri81nlnQ248kvkBSUkiBx/Ku4tQ2CFWC1J4KgQIbSAPkRXGGXzuoMbmr4299f463dfcdS0l1bcAqJiB86VNMqLvgddIaDtcLibTU2qcsrCWDzg+yMoaK3bnaefCOg461vqvs3RkMQ5qjTmXay2JcuFIeSWy27brJmFJPlz14p+s9RYftM0tjsNq52+YubAqTbZO3IckEyQsGefofpQWR1ZhNHabudO6aTkUMPu77i+u0hK3o6QAeB8wPlTuSVFOyCYe1u7dxx11IeYYUO9QFCQCeo9as7Cs26n2yBtaWQUkDy8qgFha3F+shhxxaldSeiR71YWKtSwq3TEqbIlUcGobwkOiXnwuIlcgCPY0o0lLiklRO7oQPSk7dCX0BRIMeYFFpSlMK7wJAMARzVXeCGjcpSoOEEzMzApNYUszCQOBIjkV4t7cVciJ8oBrZBJXtLm1O3melU6QCjyChDaSAJ85mknkfs1JIA56TNb3zqWgygg9Y3fWhLwpSy4pBIHnuPSi0wVoMUO8t0pKuiehHlSVikIf3BZI58McV7uBsgVq2bUjwjpSNm6pxJ7tfG0z61OHkqhC7uQVgdSJ8uQKSSCXRBKkniPSht6UXToA/ae9bFJF40RKSQTMQBSzVB3HXPq3qb2oKdqUiZ4n2ptebCGEmDEetGZdf7CFKO4bZIHWg7wd2yFI5KRwiKaflCSHTHKDGDgBS9ypmmy3cSF3YErRJ5UOh8qPs1qXgm1K8Mn16U22zqkouQpMGSPERJpt0NV3F9PJbTk0kyQEySI617l7lLdw4pZhO87ekmtME6gZIbYiJgGgM+VNXAWlcNJklZ5SKm9wNJg99fXKCC2hAZVwFOkA/h9fWnZ3UNpdMthSClxoBCpHHz4qJXl244kuPXBSwk/wDliVjy2g+tAW94/eXT/doUhiZKUdfrNVFO2W4pE1tnWRYLcd2bEuQCk8H0pG0ytszdPBSFELEJgkT/AH0xhTwtW2/EBJURt3CirRPdwPs6lJAkEEQPlJ96l44ZcYqSodg8i7vLZCEEICgkIB4FE5a2+07mx41+XlPpTZb7m3BtSUFPIbCxJPz/ADp3S6lbKlOJWN4+8F7SD8+tZb32K2JZCrsuHGpcLZCEICVLKh1jmospostqBSreomP6qTUpLjaLRbaLV54k95wghM+s9KEeJesk2/2Tu0KO+FeJST8/KiE8jlAjrLRQ85yTKeFEzxPlSi3ltOBO4FII8+laWq0pUsBUt+RieZ96GyF33d+hophJUI4iDWr5yYPwOWXUe5UQvqIM/nzQVw4f1SlR2ztBJH8Pet8y+lq1VKh84kU2vuOLwzZSqQQDM8U7wS03yGYF4uW9wEiAYHB4/wC1AKc7u8KRwopMgCJ4rXTdxDd5KdxBTyOABzQryijKMuFfAkEqHkQaUasEqwwB0lt3esAkHpMxTm53bjIncFkDg9I96absn7o4JpY3Q2gblQEwSr+VUnTwOnwY9t7tKWwE7ep55oFd0EvFKlhJMkGODSj9z96DISPTzpqcX3yxG0LmCaJJdxqORI3LpJIdTB/s1lTCz7OM5fWjFyxiXXWXkJcQuPvJIkH8KyuH7To+T2F7J6xq9p44N7YUQUbTEAc04aYt/tGdtev3pH05pzu9FZZAIctHEq9EgGldP6fv8bk27i5YcaabSoytMCt99cHj57EW1JZKuMzf3BSClVwvnpPJim6wSWsmjZ+zcHiCknkVIE5qzvE3CHeHHIIV5irl+Gz4V7j4kdarxWGztthLdi1XcXV4+yXlNpBSkJS2CmSSrzUB86qN9x9snO9+/wD6OeKwstlRMb+nI5plsL20Q6VLtnVjoYcCZHzg10vrT4Ie0XB6zymhrG0/XWWYf2tLs23FIdZJlLvCVd2FA/vGBzzUtsfgU0v2OWSc526dpGL0taMwVYfFuC8yLqv/AC0tomPmJ9xT3xrIRi5cI5U0xgMprXP2+MwGIu8lf3K9jVvbpK3FHrAgSTXZWkfht0d8OeCY1R26agTgrpxnvbbSmNWHsxdE9EgDhkHpPHupNRXNfG5pzstxtxpzsA0cjR1s413K9W5RtFxl3x5lIMoan6/IGuXM1qTJary11lcvkbvK5K6V3j13eul110/2lEyahx96qkdGjrT6d7oYfnuvodG9s/xk5jtJwLmidD4pvs+7OkQhOIx64ubsAcquXhyuepSDB/eKqpGyxiXW0pCQggHny+VMNheqt1JUlKZHWTBo5OpHGW1AtFXJ5J6D8K0UaVJGM5ym3KTtsk7LisWEv25R3g81oB/IimzJOPZlwKuUIKgd25DaW5J6ztAn60zr1M6oSWJB6eKT7UbY5s92s/Z5Uo8wef4Utpnxljxp9dxjr1pxq4eQEK3IQl5QRPrEwafbx1V1dNKUUp3KnwgCTPlFRRjOqbKf2GyQfCT0NFM6kShQcUyoFBn7wM/lVq08CrwSa5S0ygqW4ASehHNFF9bDfesKLakDwqSSFT7VHP6QM5F1tCW1pKj6DrTg6XHO8SFAgAgCY4NMKHRrLX97ZhLt5cvJVyQ66pQH0JpoRjWbvHuN3DSXElZMLExxTpaI22LcEggdOs0NYlamVAJElZHypbqD6EaY06xYFxphJQB6niaXZx6S+hDrDTiyOStO4H5TT0EKXfXKzCjCUyOTS902G7lhKFRtSZ9TwKLvgf1BTj2rVspShKEkbUJSmBPrWW+Net0KU4ptSQD09aNdaSoMpHEGeKP4Sz4RyRE+9RVghC1UlCUAKKFbRyDNNNzkbpYU0yyvcFEEqlMc9fejL91rHWbly+4GkoEkqUAKijurkXCSsPXoaMFIbbAH0BIJ/Cqq0T9SbY5t1NstToAUsAn/AAaCyYfVBZJBHXcOoprwmcGQaX9nuVqWP3XhBn3FPLlztZm7SGjP3pO2iVD9ButXLj7cyhxwqb3A8qJIIp4v0hFurzPoeleNWqSokDcscSPKkrpSW2VSVRHSCAPrSVVhBzgNFwF26QkgLgSPWlMUN1upRTMBXQ9ZNJItim2BG4eg9KXxDa2rRxWwzBiCCDzU97oMob0QbxwqWmR8pFabCq9Z5BBk/KibNJF1cHmTHQc0nEXzSoCAOBx71bqqGe5oQglBBMgHih71o90pBHi2xtJMzR2ZZbSpSQpclYkx0/xxQl+4ojancQE9QP50QrhiFmjGGYbUBunkk+VNdulRau1pWUkK+8oSD/dTkhBRircrUqVA9RFN1u4Wbe54E7uUn0+dO23TFVZRtp55tOSLagrcW/EsmRSuVShailXIJ6poLFspTfrJUASmefSvbsqWpG5wpSFmEyKLLB7rHoW042ghJWmCtadwEenIpe0062hppbri3XUp8KgYTHp7/jSN94gsJKk8RvHEU8KVFoyhUrhHhVESfPmldA238w3WyEOY99K/AlKyAowJ+VBMW5Lsd+EJjq5Ak0je3TzY7rwhIUSFKUIH8KZLjUocuhbpbCg0k/tCSdxFQ7jhKzaCVXZKU90h9hC3g+pxwcATA+c807X14MW8HUJCoEbViQahLGcTeNoc8KVNrEp9PcU53mVVdQrvgkKTuBUYkUl4ZbSiSF/IrYZ7xb6Gtyd21QJIJ8ueKaxm0ONOhDqu8Wdq5QACPzqL3T7j44UlSjz98bvwNJtXS0LICYCRymJANG1dzPc/I62LyRkFpKSDsMAnim/MPKVkGZIkOCZ8ua8xz4OSSqSYSR8qTzw/zpJHhUVBXh5NWuSHlj1mCCwpO2JHIkdKAU4o4FpSOnPAPvHFL5J0Lt0JKyF7f3jNNqbucShskTJnyHWhPsJrGTMI5vVdp/1XCTHSaHvdyblIEgTImtsQpQuHghQA7uSSRBrS5c33bYJMkxJ/hTSrgK7DXevbXSgqOz+zwTTe46smSVbZiTHNPF1Y94pJWtSo6DpxWmO0lmdW5JvHYXGXmUu3ICbWxYU8syeISgEn8KmU1DN0VRH275wOlJ3Fo8H+/wBquz4Yvh+yfbv2gW1gxbLcxTDiV3bokCJnbPl7+1XB2Qfo0NcZhm2zHaTkLLs50ySlxwXziF3609YS0DCCenjMj+qelXF2sdvXZ58NmgzoDsxtFBTjZbeddXF5eH0cPBaQT13bVK5hIkk+H1PXPqH9n6TMny+yPT6R6Wk/e6quuF2/EuNhvsqwbDeON884bNIt99vjluNq2DbKVbTuTxwZMisr5n3HbH2j3Fw66nVNzbBaioMsvbUNyZ2pE8AdAKyvH/6fl31Jfmex/Puq+/8A8/I+21z8NnZfcOJcd0gwpQ4HDn99cq/pBOyvQPZn2T2l9gMEnEZO6u1N963vgoDSioEKMekV3/FcCfpZMolnR+krFTh8a7h7Z9Epn/q/OvtsM+Odo+VLKEl2QswBweB0rv8A/RMt9z2naleZUl9KsWA6VPpT3Q7wRCep5H0muAykIU7uWlJSklIj8qlHZ12v6m7KDkDpnMOYRy/Slq4dYCQspSSRCiCRBPlRLj4eTROj7C/Hn2ju6M7Fr7K4HWNtiMpZK7xeObyCLd2/bPhUhMELUUzuCUkboIr4u5LNHV2Tfvspf3b7xClBwtggE9B14EnrQOrNSXup7032UyF3lcg4oqcuLx5Tzij/ALyjNN2PdQWlADc4pUbfIVnDRSlvfJ1LqtRaXuE/h/X8zE2HiOxZgn0mPpRbdqWUQSd0HxwKIQhbaILDi1TyWwD+ZNKFl5TgBZfbH7u6B/Ot2s5OW3wIbFJ2joOgkz+VKG2deA7zxAdOKOtMctbvhIKpmCOetPbWMUUnvE7j6AUbaQDBbYwqBSICvKacbXGPIV4YAjk08otUpG4oMnjiARRrNsDCDtSqOvSpvyhUR1eKccKSEDn05olvDHw7yQII29PwqUIaabMDr6+9erSpwfdHXg0srKJT7DFYW6bMthIMqPG7y5p7YQp5t2ClSgTAIEH+6tEMhbsqEBPH3f504Wttub8KoT5HbQs5H3FWmSy0Ek87Y5NDWjTkhPmpZIkRTi2ySggJmOsCtGh3akgkmJn0B+tF2gbsRZSsvOgztMR7V7kGdt7Mgq28A8+lEtbHXREEFUx71j7Z+0KJKQlXmTNO1HkmrNX2tyGglULEEx1rVYCEhKjuEyDHnW7/AIC2W1dOCZma8eWS0YIAPINThsLGXUs2+EusgiyRkri2Ept3E7kyf3iPaZ+lRLRebf17c3zGXtbcNW7CnUXyW9pYUCABPpyTHsanKbl20K1MrW2ViCRCpH14phzdpncvbpthlWLa2WqVoatwkrH9r1/GKr0HmiHG7Wcnj0tPd/cKe2NutNkB1M+Y6x71aqm31MAJbQt0EQFCR+dR/Tmj7XEP/aHnl399tI3qG1KR6JHlUxsUB4ArKkmY8I6VbleGDE0hW0JJIVHig8UKq3W9IUChPSDwRTspCA8QFGR5HzrLwIShKNhBKRHEVLS/ssBAthq1SnmRxIHQ0pi3D+rHSuSdp5PB61q7LbZIVKR19BSloEps3YWFkxx86lY4DIJbEOOvFKCkyJJ6dKHQkqyDSU9J5EcDmimisOLJMA9efFWqFFTyCCZBAnzqqyDdcCeWeCFrBUF+MAelCXgKpB4448oou7cUt1W4JV4uQRJPNJ3YU4IB8opJdh9hVoBvD2okqimwrKLd4qBCd3KqdNxFmyAQRtgx/KgFT3auCtJ+9yNo/OhKgTsa7d7ZkSpKQITB4rS7cOxO0BUK8Q+tOuktKZ3WefcssBhshmb4ICjbY1hx9Q8pIQDA9zV56K+Abto10+2l3TSNNWSgD9uz10hkH1HdoK3Z+aBR9Qbo5wuHSWnUmQI4JHUn2p3XcJTb24AG7uwBHAmPevoZ2MfowsfprUAyPaHm7HVdh3C2v1LbWzrbe9SY3l7vASU9R4RzzUJ1F+jA1C3mL5ON1fhbPCd6r7Kq8S8t5LUnaFiACQIBO7mJrn19fS0I7tR0hwuWEcD3NqjI29wFHYUq4I5g8c00WmEdNyprYH3DBLqHNpCeZER8q7sc/R76d04idR9vWmcUgn9on7EgE+oBXdJ/h9KOxXw0fC/pZbr2V7ab/N3YSd6MZc25Rt9kNsuKn/irydT230kcRbb9EbrTn4ODV41pru0PFtCFKENNKkk+pPl/Gls0htCklaw35CTHHpXcWWs/hL0o8h220XqjXDrJlLjztw01Pqe9cZSfwNbr+Lvs10alTej+xzSOCdQPDc3aGC7Hqru2yon5qPzrgl7b1tTPT9LOX4bf3NlpfekkcJWWByGUuUCxxt5kSoja1YsqecPyCAT+VTPC/Df2r5BsCx7PNWrt1qhA/U1ykHzklSAB8ya6Qz36R/VTKu5xt1hcTbwSlGPsfF/1KIB+aarPN/Gz2v677xvF57Nutef2VQtuPXc0G4/Gp+2+2J/F7iEF/ilf7I00un97LbC5PwkNOH+CntvvXUuJ7PckwmOFXT1u1HzCnARTqv4Bu2W5WFXtjg8KynkvZPO2zYB852KWfyqP3Ootd5tptzNZt0PrB7xeTvnbxYPodx4/5jQT6MfasKXk80tTyh4nWUBBHyABj8aX2v2hJ09WH+WMn+tn0On/AAz1ritTU09i/wATS/csJ34K7vbszPa92W4dwEBbY1CXnR/whtPP1oxv4NNCiz7i97etOqUnmcdiX7oDz6pXz1qnncvhmUCLnKvpHSXT4h6iTQStS4m4BLdjdvJCuO/uPxHXpVJe0J/+WX4Qiv3ZMvYulp/1Oo01/mb/AGTL+xXwidkGIHeXXbY+6lSZUq3026iB81L4o5rsJ+GLBbH8r2kav1Apsz3OPNlbJJB6ELSVH6EVz7iXVahuFt4rTTL4RJW886hLbZ/tLdIbQP8AeIqSYM6SRdp/phqnHYO2QqVW+m7O3vHVjzHelaW0H3G8UtTS6nTju1dSb+lL9EmTH2X0snjXT+ib/ei6H9Z/CzpBpH6r7NVZ9afPO5S5dWoepaSFIn2mtMj+kYb0ZjRhez7RmK0bY+TGGx7Vson3JCgZ9doPvTJZ6s+He0RFthNWajcQJ76+ybAK/wD+NDfd/iKgOvu0vspx1k8nCdn2csnFnaPtT9kGj8wlifxFcENR6k9sul1JfV4/V/6G79ndDD+p1G36x/2bBe0b4gO1DtQuFP5XLnBtOpgKVcw4EccErWVgeu0fSm7S/Y92YPNi71N212rF6VbnLbHYZ+4MnrDjpbk++2KpjNas/Wl0pLWDxtm2tXAaY5SPKSOCfeBTTdWjdrdOpgJUDICeifavqPs2tPTS0pe6+iX+tnk666Lp51CXvF54/Ro6x/ycfDqnhXabqlShwT+rWhJrK5I72f8AarH+PlWVxfyrqP8A25/p/sX/ADLp/wC5X6f7H6ZyoJ6kD5181P0pjl1qXXGBwtoQo22L74c8eN0zz06N13nkuzZ7IKk5u5bHXwj/AL1yv8ePZdj9K9kqc4u5N3lFXjVqh5xADvdncVJCup6dK9y5PsfNO3wfJ67waWFkvXzQAH3USoj8OKZ7jG28kd6tQ67p61KncNfPmE27oTzyWzx+VBXmkMrbNb12Fwls/dKmyAR6iuyNS5ZdkacRaMs8tPOKSf3VAQKXxKEXVvcC12sP7ZBeUCT8veppo3sf1Br+7VZYv9XMPTx+sb9q0T0n7zikj86h2qdK5DReeu8XkA03e2yyhaGXkuJHuFIJB+YNVtVEqXYXwNxeYzH3yr55TTCgEo7wSpSvQUirMu3rjbaXnPCQYI4ptbT36R3h73mOVcfjTzjre1YgylKTxMAfSsnhhbJLg2A4YkqVA5/70/JtHEKHoOabMVcMW8AKTz6ECn5hSFEHvQs9BJkis3F4ZSfYTatSR4lwOOopcWgSgriZECelEoQ0pQbVtTJmaLEFW1IJSeokGqWOQY3NNIZJ449CaWDSlK8KSkj1pwRbM71TE+vQUqhphyCkhPi+8OaWOxHcb0sFKwSk9PKimYQ5BVyfXr+FFqYAPXweifP0oVQV3koVtTtgpPU0c4SH9QqUlqZSJ680igpQo9T8xNJNK3JWAQR0kitXHCkkFRBJijaxUkKF5PfoLaRPWAOOtIXit70qIEqjaKSlBuQVK/Dih729S0psfdB569TVbV3Fdh7qSiAgSD/WMc1qSExuIEiNo6Umyvv1lXDiB+96fStn7ZtQ3oWoJBggnr+VLauwZfJuEMJbCRuUJ8uAPyr1TTSkkpASU+ZFeIbLJhUFAM7f/SjP11a95+yYZQYjhHn8zNUo3wF0huShCFHcuAffyovHOft43EnoAeKSfUkubihEq6mOK8tHQnYopAINRtfYalY9vspQ8e9uEJR57ef+1eXimklClKUuAOOgIpvdWhYUofveQr1y7T3iI8Q2gGfl1ptJAhZ9QLKfCDPMelbMqSGVpnmR0EetIO3AUjZtkkcKB6Vqh8paUncNxMk02qyM2StO5Unn1H8KTQsl5JMdevStGjIUSrn86wRCCpO3xCDSTkgTXc8dT3iySedw4HnWi1FMgBJH9o0mtwKWqSAoEx16UQ02gL8XMc/P3ozyhIxwL7sDbtKT0pANFNusGAT5GjN6CrxE7h5A80M7+1SpIPi8pP8AGlRV2i/vgK7UFdm3bm1jX7pu2x+pWf1c446ZSh+dzKufVUpj+0K+syHmygL7xK4kFSTxPQ18FGl3Fo6y8w8WHG3A428jqhaTKVD3BAP0r7HfD32yOdr/AGOae1PjcY2u+uUKaybLboSlm8Qdrw+RUNw9UqBpSi+UCdlx1x5+ke7ObTJ6BxWunbe7uRgFqYumrV0pBYdI8ShInasDnyC1V1zjXLt20Qq+abZuCTuQ0rckCeOflTZrrR2P7QtG5nTWVaD2Oylq5avIUJ8KhE/Tr9KhxjNVJWUnWUfChGrrEsOO4rA2NuEqKSt+VlR9fX86RsdVaizb6mbBMJAiLRuEpPqSQRU11n2c4rsi1LmNPX7N3mcrjbpy3cYcSGGQQeJMkqBEHymRFJ4jSer9Z2i/slgcNg2uq0EWlqBP3e8PK/kmZqJPS6ZOTpI7um6XqOt1Fp6EXJvwQDK4y7S6hOczzjrs7kWjALxT7qPQT6EilLLsrzep0qebtzjMag83N4vaj58DnjyA+tWa3jNI9n6FCGs7lDyQ4mGW/wDdST4ufM1DdW65yedSXe9U02Bt2JMCJ6ADoPavLfV6/V46SNL7z/0R91p+wfZ/siC1vbWrcv7uPP4vsaf0G0Vo8hxzfqC/b5KnTDST7I8/rQVxrS+v0i1tGGsdbgHa2wgCBHyio6b1C0qQOFCEhMnpTnhWErcX90q6CDW+n7Pj82vLe/Xj8jzep/ifUhH3Xs3TWjD0XxfjLkbXW7y6v2Xbl11wTO1aiRNG5C0LqHOJ3+fpTqLaXSSQJVMivHGRcPltKNyyYSkGu9RjHEUkj5PW6nW6mW7Vk5P1djbb4S4v121pbW713ePwhpi3QXFrUeiUpHJJ9q6K0Z8DuZwmFbzfaRe47R9q5+0as8lk2bUqSRMrMLVPQFIAPvTJ2edrafhsx+SXjcBZ5bXN23tRk3HRvx7RSJbaids8lSoBJMTAAqqtXdpeY7QLq6ymbe7zI3DhUtW8rJ9ipRk14mu+u6mT0tD4I/e5f5HqaENLo17zqEm+y5/ZpfuTDtMymi8HeHGafv2s+W/37NDzVi2ehCdxHeH1Vtj3qlsyhl1LjvcsoIVu2toEfKi9264SqAo8+c/lQ9+jcHAdpSoeIjqPau3pOifTpKU3N+X/AMonqva+t1MPdKMYR/wxS/XkkFplLTEYRnukBTikDqJMxUL1blHMlagvJCGwqY9TRdyVIs1NtwFJAIV96KaMpkbi5tTbOto2DkqT1/CvVUc5PCyM1s22tzeFBSIHJ/vo2/sgp4uEEpUATJoY42zaUgC/hExyxyPwNHPvsOwEvJMJAMtq5gfKqaTwmGVyNP6sb9R+BrKde7SeimyPLgisp0hbmfpPri/9KNlRZ9juCtNyQq4yC1CevhZUZH412hXCf6TllOVa0XjHy4m0cTcuKWlW0BXgA56TBNc06rIM+Vi8tfocQUPOJSiJAPUisv8AV+bumgkXb5QgmAFGE1ZjnZrgCFf52/4zMi4Sfn1FBns0woUE/bHT6AOJP8qpOL7BRAcDr3UeFukrsru9tVA8fZ3lIJ95SRzTdnzdZW8uru9S+5kbl4PO3FyoqWrg9SeTPr7Vaw7NcWwoLbvHgoGEglH8k/OvX+zyzu1+O6uSqPIIM/8ATNbKUeCc3ZTlvYKUySAU8wSRFLNWCz4YASOkHpVsDs0s24Kbp/bMg7Uk/XilxoRlTTn7d7wiQdiT/KhzhHng0rwVhaWzu4QrZtHUmnhmyuJBFw2OOBJqe6b0EzlrhwXLpbYT1IhBJ6ACp3YfDFrLNkK0/pLK5NBA2LTblCeeh3OFKfzrL3uknVk0ykU2F4nalN0mZ81Ec0SjHZDaVh1JIHI3kGukMP8AAj2y5ApUrTDeOSZ8d5krYAH3CFqP5VJbP9HP2nXaZubjBWoUOQq/cUZ/4WjQ9bTbJzwcs2ePyVwYQN0DkBziKdbbC5Huwe7G2D0Vx+ddbae/Rw65xneOHUGDStSNm0KfMT5zs/lTwj9H9r5pISM3gHeOCpx8cf8AwzUvW0+zC7ONGE3TG5KgkKQkqhSwTHtRyrhSG2z9kbLik87iODHzrrt34Ce0Fkj7PktOOq8ip15JHr/szXMOrLXI6b1Lk8M+3bM3FlcLt3VSVJUpJglMDpPtVRnF4vIDCh0lA2NN7enh5FB3C0uLKVMJSZncDzFONw8+7+zevLbnqdjoj8EUBcWb7qYFxbeLoR3k/Xwir3JZEskdyt8qzudoUhpHqtYP5TNR+91KZhtlx1wfvE8D6U55nSGTyOSuLlKrfuVEbJcIO0ACAIoRHZ/lS6koSyqTBAuEgfOSRWqafIWrob7XL5VbiXWGnDB5SAYH0p5Zyt5dOq7xh5lU/uyR+dG4fs5yV9lrezbbQp91fdtIQ8DKvcg1Ksh2Ha5ZuGrXGWTeRuT91hD25Z+QPl7mom4rFjXJERkX0DwofA6KKpk0rbZLaqXUkJHms8/nVk2Hwx9uRYCz2X3ly2RIKb61SPwLk14/8OfbMgkOdkmVKhyFIuGFfwcNZqUHmxbvJCkZNkgLU6g8dK8GUaCoQ6mCegPT6VJH+wjtYt194eyfUYEEQhrefyptuOxbtKbX4+y3ViSeeMa6rn8K0uLfI+wD+tmS0UhRhPUg1u5kmlu8LEEAADiRFKudlWumUEOdmmrkqI//AGp+f/poZ3RGrrZPdu9n+qGwfJeHfJA9OU1NxWB2qN/1sjvBKwI5UZit1ZFtTKj3iFkKjgQabnsFnWwUu6R1CzAH38U8nj/loJT900mX8HlEJSIhVgtJHz8NVXdEfiPaciju5gcDrHNKou0LKQTCSoACZJqHu5qxYWS63cM+o7opPTzkf4itU6jwsgm9cbWRyFJ6fKpvI3gk1xfIQ4uCQBPB6/jSqr522u0N3TSmHdqVBKp5SRIP1qHvZfB3BKk5Yp8gSPnRF1kcXcXYeTlUg92lJBEjgfQ03gomasq2hQ8SN09QOB860usk2lJWlSXJH3k+VRMZHG7Fg5FLpgdFdaTGRsoITkmyIiZpLOBKyTPXSFMJhcBJECuyP0bHbCrC64y2gr7IqtbDMD7dZgkbftLY8aRP9dsD/kFcFKvWXUlDeRBE8eKn3QutL/s+1fhNQ4i/ZF7jLpu7a3KjfsUCUFQ5hQkE88GlVpl0ff6yyllehD1q936FykKSokcH0NOFfKjVn6UbtAv0uI05htN6WYVylKEqu3QT571BKT/8OqD1x8VXaZ2kvqcz2vsncIUNptbe8Nrbx7stbUH6pJ96UdNpZKVd2d5/HpgbLs+y+N7QrXTmMyDuTWMdd31xbpdU08hMtEzxJQFJkg/cHtXD2e7Ss/q67dNxdr2wShrcSEj28o+QFQJHaFnFadewis9cHBv3Au3Mb9tULZ10RDhb3QVDjmPIegoBGSuPCpp1kggJ296OPmJmuLU6PRnPfJW/+dj39D2913S9P9m0JbY+Ulf58jxeNoXJcXucJ4TPM1H804EBakEpPMpnp70rcqybqgUMpJB6pcTz9CaDurXIXoJXa7lI43BQgcedd0IJJI8KepOct03bYzW74Q93yvAjpuERUu0/3bqCpsyVcDzqFX7V4psIWhYSgESrkjmpB2P6V1Fr3XeI0pp21VfZbIvhhltUhIHVS1HySkSSfQVpKLMr7kyurJ60sLe7dYfYtboLFu+40pDbxSYUELIhUEcwTBq1ex34ZNRdoN7j3XrN95u7Slz7KCpCGWTyHrpwQUJIkpQCFKHPANfU3sz7EMHofsZ072fZS2tNR2GMtQ08b+2Q42+6SVuL2KBAlalEDymuG/0ivbdkNM6wstBaTyS8ZgBYJfyVtjyllLzqlqCUlSIUU7U8pnaeK8D2jp9Xqw930rSb7+D1vZ/UaHTyepqae6Xa+F6tdzmfttThsdrbJ4TTb9s9gMcpNs27ZtpQ084lP7RaQB0KiQDzISDJqtUtLaZWY85ABisusr3iESJV5CYj5Ckvti1MwG1COYAP4139HoS0NGOnJ21y/Jh1nVavWau/UYgGQHAsAxySSYFBXEoWrYNoPp50V9rAd7tSST1UAJEfjQd8w74C2CqeiSo8D+dd/q0cWLoFcCi8Qkgq6En0oV7EuP2wf7xCGt23xTJrL2zukkFBIWPEDMCtrN25bYWy+A4DzKTz70n5FTI9fMlh8IDra1E8AiIpMkb+ImY4HlTlkbBKnStKFonkSKalKdtXCFIA9JHHWqSbaoMsOSE7Rwrp6ispsOQdkyzzWVrtkTR+mik3mGrhO11tLg9FpBpSsrkKG+407irue/xlm9PXvLdCp/EU3udnulXp7zTWHcnk7rBo/wD21IKyo2x8DIo92TaIuUFDujsA4giCleMYII/5aAe7BuzW4Xvc7P8ATC1cDccRbzx0/cqdVlPahUivF/Dr2WuAg9nWmOesYlgfwTSCvhq7KVzPZ7pyD1H6ubj8Iqyqyk4RfKGQ/T3Y5oTSVz9owmjcFibjp31njmm1/wDMEzUs+ztRGxIHsKUrKn3UPACRtmj+4PwrPszUfcH4UrWUvdQ8CEhbNj9wVn2Zv+oKVrKPdQ8BQI/jGLlBStJg/wBUkfwrnvWvwD9l+ts7eZd79dY28unC659iv/BvPJIDiVxz9K6PrKa04xdoKs5HuP0aHZq9uCc/qpsERH2m2VH429Avfow+z5aUJb1TqhCU+RctTP8A8iuxqyq2vyKjjBX6MTRoPg1fqCJ43otzH4IFIL/Rh6aAPda1zCVdQpdsyqPbiK7WrKVS8go0cc6Y/Rw4PT+dtsi7rTJ3YYkhtFo20oyI+9Jj8Kv/AEl2I6b0Vb93jbBAWfvPODetXzJ6n3NWPWVlLSc+WFEf/o60BwhP4VodON+SB+FSOsgelZvQruOiMHTTcf6pP/LWh0y2P3BI9qlUD0rzaPSktEKImdNpVx3aY61odNNxBbT8oqX7RXhSOOKl6dAQlemwmfD9KFOlwVco+QFT5TSOPCK1LKOu2o2PySV27pNKhBaSR58TNA3OgbK5/wBbY27nuppJn8qtAWzZ/drw2rUgbaHFp4YNJ5opq77H9P3TZFxp/GP/APvbJtU/immW5+HXQr4PeaJ0+v1KsWwZ/wCmr+XatkRt4pJVmyDGwRHSh35Ckc33Pwrdm9yf2mgdOL9/1UwD+ITTNefBj2W3hJOg8K2T1DdmhP8AKupjZNQBt8q0VYsjogUNya5GvQ5DuvgZ7MHVlQ0fjUeyWAAfwime5+ATsyfWT/Rm0QBwNgcT/Bddmrs2d0bBWJxzAWRs4HFNTm1yL1OEc/8Ao7tCXLCk4/FtWTscbnbhY9v9qIqEu/o0MbK4yKIjhKA6An6lZJr6WKxNsUf6sUM/iLUJJDYBqnqasV8w6Pma/wDo07Vsq25MkHiUqWD8/Om9P6NhhLhL+UfZTyNzLhKunXkD/Br6ZvYy3TPh8utCHHMKiUTzSetq4yPB813P0a1pt3N6iyTZAI8IR09ZIoVf6NhraSNV5SB59w2on38q+lj+OYC4CABQzmNtx49g3etP7RqLuCS5PmPkv0ddzaJU61q++dIHCHLVMHjofF0Pypu7Psff/B12s4DV+RxrmcVaretF2ji0sr2rQPGlY3CQJievrX0+cw9o8IWylQ96huv+xbR3aRZs2OosOnI27J3tAurbUg+ykEH860jr6jltk8DpIovth/SGWea0CG9Ch62zdw4UOJyVqUqtU7eSSCUKM9NpUPWuFP6K6u7ddWZZxv7VqDKo2uXN69cpBAV0MrI468D6cV33rX4Juy8Md7aWWUx6oBi3yTpTz7LKqiOjvhq0t2fZG6vcNe5li5XtStZvB4wCYBhI45rZ6kYJuIbrdHIjPwW9oRe3NtY5CeCftN2qR6jwIUD+NT62+F/tBx+OYZ+y4laUoA3IvVSr3goFdmYvDW6UBKy67Hmt1XPA9DUgtcZZNsx9kbUAvb4io+U+tc/2rUqmNxp8nBj/AMNeu1ISVYqzXtMhSbpE9fKgL34etcAlxWCYcHRJ+1skk/VQr6CqsbQulJs2DMGSmaSbYtg/tFmwATHCPStF1E+BJWfOPMfDtrZxHGm/FHT7TbkE/wDxKieU7GtY4xouOaZeQ0kEqUHGigAecpWfKvqQ+yy08lKbdoBSwPu0g/a26rS4K7ZlwbFApW2ClQPUEdCDVR6qUcPION8nyXzej8ha2CLhy3Q22AmSSFRPlNRHIYO9cb3Is31IUeqWlH84rvP4pOy7T2ldDHK4i0VYKdcShds2uWfWQFSU/JJA9q5MxHjvENdEhwp94Br0NKdq6Jl2oqz+jGSPIt3I8pQuf4VlXspKdx8KevpWV0b2Tt9T/9k="
# #
# # img_path = handle_path(MEDIA_ROOT,"tmp_img",f"{sn}.jpg")
# save_base64(s1,"test.jpg")
# data = {"img":img,"work_order":"A12345","sn":"s0001"}
# data ={'work_order': '123', 'sn': '111111',
# 'img': img}
#
# #
# client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# client.connect((SOCKET_HOST, SOCKET_PORT))
# client.sendall(str(data).encode())
#
# print( str(client.recv(1024), encoding='utf-8'))
#
# predict_class = str(client.recv(1024), encoding='utf-8')
# had_error = re.search(r"Error : (.+)",predict_class)
#
# if had_error:
# raise Exception(had_error.group(1))
# else:
# print(predict_class)
# client.close()
#
# print(datetime.datetime.now() - s)
from typing import NamedTuple, Dict, List
import matplotlib.pyplot as plt
from io import BytesIO
import numpy as np
from PIL import Image
import random
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
class ImgSize(NamedTuple):
width: int
height: int
DEFAULT_SIZE = ImgSize(640, 360)
def show_array(np_arr: np):
plt.imshow(np_arr)
# plt.axis('off')
plt.show()
def _resize_size(img: Image) -> Image:
height, width = img.size
scale = DEFAULT_SIZE.width / width
return img.resize((int(height * scale), int(width * scale)), Image.BILINEAR)
def _img_to_base64(img: Image) -> str:
output_buffer = BytesIO()
img.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data)
return base64_str.decode("utf-8")
def _base64_to_img(base64_str: str, out_path: str)->Image:
image = base64.b64decode(str(base64_str))
img = Image.open(io.BytesIO(image))
img.save(out_path, 'jpeg')
return img
def zoom(img_path):
img_height = DEFAULT_SIZE.height
img_width = DEFAULT_SIZE.width
img = tf.keras.preprocessing.image.load_img(
img_path,
target_size=(img_height, img_width)
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomZoom(0.4),
])
img_array = tf.expand_dims(img_array, 0) # Create a batch
augmented_image = data_augmentation(img_array)
image = _img_to_base64(Image.fromarray(np.array(augmented_image[0], np.uint8)))
return image
def rotate(image_path: str):
# open image
img = _resize_size(Image.open(image_path))
# rotate
image = _img_to_base64(img.rotate(random.choice(list(range(10)))) )
return image
def normal(image_path: str):
# open image
img = _resize_size(Image.open(image_path))
image = _img_to_base64(img)
return image
def predict(image_path: str, sn: str) -> str:
# image = zoom(image_path)
image = rotate(image_path)
data = {'work_order': "work_order", 'sn': sn, 'image': image}
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((settings.SOCKET_HOST, settings.SOCKET_PORT))
client.sendall(str(data).encode())
data = recvall(client)
accuracy =(((int(data["predict_class"]))/3) * 100)
exec_time = data["exec_time"]
with open("result.txt","a+") as fout:
if accuracy >0 :
accuracy = 100
else:
accuracy = 0
fout.write(f"{accuracy},{exec_time}\n")
# show image
predict_img = _base64_to_img(data["img"], f"out_img/{sn}.jpg")
show_array(np.array(predict_img))
had_error = re.search(r"Error : (.+)", data["predict_class"])
client.close()
if had_error:
raise Exception(had_error.group(1))
else:
return data
if __name__ == "__main__":
# if os.path.exists("result.txt"):
# os.remove("result.txt")
for i in range(5):
predict("/tmp/fan_detect/media/test.jpg",str(i))
# filename = f'zoom_img/{i}.jpg'
# with open(filename, 'wb') as f:
# f.write(base64.b64decode(zoom("/tmp/ob_detection/training_demo/test7.jpg")))
# accuracy = []
# exec_time = []
#
# with open("result.txt","r") as fin:
# for line in fin.readlines():
# print(int(line.split(",")[1].replace("\n","")))
# accuracy.append( float( ( line.split(",")[0]).replace(" ","")))
# exec_time.append( int(line.split(",")[1].replace("\n","")))
#
# print(sum(accuracy)/len(accuracy))
# print(sum(exec_time)/len(exec_time))
# img2 = Image.fromarray(data)
# print((img2))
# processed_string = base64.b64encode(data)
# print(processed_string.decode())
#
#
# output_buffer = BytesIO()
# img2.save(output_buffer, format='JPEG')
# byte_data = output_buffer.getvalue()
# base64_str = base64.b64encode(byte_data)
#
# print(base64_str.decode())
|
from rest_framework import generics
from ..models import Product, Service, Certificate
from .serializers import *
class ProductListView(generics.ListAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductDetailView(generics.RetrieveAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ServiceListView(generics.ListAPIView):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
class ServiceDetailView(generics.RetrieveAPIView):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
class CertificateListView(generics.ListAPIView):
queryset = Certificate.objects.all()
serializer_class = CertificateSerializer
class CertificateDetailView(generics.RetrieveAPIView):
queryset = Certificate.objects.all()
serializer_class = CertificateSerializer
|
from flask import Flask, redirect, url_for
from app.models import db
from app.controllers import blog, main
from app.extensions import bcrypt, login_manager, principals
from flask_principal import identity_loaded, UserNeed, RoleNeed
from flask_login import current_user
def create_app(object_name):
"""Create the app instance via `Factory Method`"""
app = Flask(__name__)
# Set the app config
app.config.from_object(object_name)
# Will be load the SQLALCHEMY_DATABASE_URL from config.py to db object
db.init_app(app)
# Init the Flask-Bcrypt via app object
bcrypt.init_app(app)
# Init the Flask-Login via app object
login_manager.init_app(app)
# Init the Flask-Prinicpal via app object
principals.init_app(app)
@app.route('/')
def index():
# Redirect the Request_url '/' to '/blog/'
return redirect(url_for('blog.home'))
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
"""Change the role via add the Need object into Role.
Need the access the app object.
"""
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity user object
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Add each role to the identity user object
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
# Register the Blueprint into app object
app.register_blueprint(blog.blog_blueprint)
app.register_blueprint(main.main_blueprint)
return app
|
class Solution:
def rob(self, nums: List[int]) -> int:
take, ignore = 0, 0
for x in nums:
take, ignore = x + ignore, max(take, ignore)
return max(take, ignore)
|
import pyspark as ps
# in the command line, the command is python create_spark_context.py
# in the spark shell, i don't need to initialize a SparkContext since it is done automatically
conf = ps.SparkConf().setMaster('local[4]').setAppName('My App')
sc = ps.SparkContext(conf=conf)
|
import os
import conan.tools.cmake
import conans.tools
class TestPackageConan(conans.ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake_find_package_multi"
def generate(self):
toolchain = conan.tools.cmake.CMakeToolchain(self)
toolchain.generate()
def build(self):
cmake = conan.tools.cmake.CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not conans.tools.cross_building(self.settings):
self.run(os.path.join(os.getcwd(), "example"), run_environment=True)
|
# Generated by Django 2.2.5 on 2019-09-12 12:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20190912_1445'),
]
operations = [
migrations.AlterField(
model_name='article',
name='response_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='articles.Article', verbose_name='Response to'),
),
]
|
#! /bin/python3
# -*- coding: utf-8 -*-
# Adding temporarily path to enviroment
import sys
sys.path.append("../")
# Third part imports
import unittest
import requests
import time
from datetime import datetime, timedelta
# Our written python scripts
#from config import TestingConfig
endpoint = 'http://192.168.1.52:5055/'
#endpoint = 'http://localhost:5054/'
"""
Important note - Gunicorn and Flask developement server uses different error codes
during a failure.
"""
sensor_id = None
sensor_id2 = None
sensor_id3 = None
temperature_rowid = None
class TestAPI(unittest.TestCase):
# Setup and teardown
def setUp(self):
print("\n===========================================")
print(" RUNNING METHOD ", self.id().split('.')[-1])
print("===========================================\n")
return super().setUp()
def tearDown(self):
return super().tearDown()
#===============================================================
# RUN TESTS
#===============================================================
def test_0_Login_Logout(self):
# ===== TEST WRONG METHOD =====
url = endpoint + "auth/login"
headers = {'Content-Type': 'application/json'}
payload = {
'username':'test',
'password':'test'
}
req = requests.put(url, headers=headers, json=payload)
print("*** Answer testLogin : WRONG METHOD ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "auth/login"
headers = {'Content-Type': 'application/json'}
payload = {
'username':'test',
'password':'test'
}
req = requests.post(url, headers=headers, json=payload)
print("\n*** Answer testLogin : WITHOUT TOKEN ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
token = req.json()['data']
# ===== TEST WRONG METHOD =====
url = endpoint + "auth/logout"
headers = {'Content-Type': 'application/json'}
req = requests.put(url, headers=headers)
print("*** Answer test_Logout : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITH TOKEN =====
url = endpoint + "auth/logout"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("\n*** Answer test_Logout : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
def test_1_AddSensor(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/sensor"
req = requests.post(url)
print("*** Answer testAddSensor : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
url = endpoint + "temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
payload = {
'name':'test1',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - first'
}
req = requests.put(url, headers=headers, json=payload)
print("*** Answer testAddSensor : WRONG METHOD ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
payload = {
'name':'test1',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - first'
}
req = requests.post(url, headers=headers, json=payload)
print("\n*** Answer testAddSensor : WITHOUT TOKEN ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
url = endpoint + "/temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
payload = {
'name':'test1',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - first'
}
req = requests.post(url, headers=headers, json=payload)
print("*** Answer testAddSensor : WITH TOKEN ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
global sensor_id
sensor_id = req.json()['data']
self.assertEqual(req.status_code, 201, msg=req.status_code)
self.assertEqual(req.json()['msg'], 'Success', msg=req.json())
def test_2_GetSensor(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "/temperature/sensor/" + str(sensor_id)
req = requests.get(url)
print("*** Answer testGetSensor : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testGetSensor : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.get(url, headers=headers)
print("\n*** Answer testGetSensor : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testGetSensor : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
self.assertEqual(req.json()['msg'], 'Success', msg=req.json())
def test_3_GetAllSensor(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "/temperature/sensor"
req = requests.get(url)
print("*** Answer testGetAllSensor : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
url = endpoint + "temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testGetAllSensor : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.get(url, headers=headers)
print("\n*** Answer testGetAllSensor : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
# Adding one more sensor
url = endpoint + "/temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
payload = {
'name':'test2',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - second'
}
req = requests.post(url, headers=headers, json=payload)
global sensor_id2
sensor_id2 = req.json()['data']
# Get all sensors
url = endpoint + "/temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testGetAllSensor : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
self.assertEqual(req.json()['msg'], 'Success', msg=req.json())
#self.assertEqual(req.json()['sensor'][0][0], sensor_id, req.json()['sensor'][0])
#self.assertEqual(req.json()['sensor'][1][0], sensor_id2, req.json()['sensor'][1])
def test_4_DeleteSensor(self):
# ===== TEST WITH WRONG HEADER =====
global sensor_id
global sensor_id2
url = endpoint + "temperature/sensor/" + str(sensor_id)
req = requests.delete(url)
print("*** Answer testDeleteSensor : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testDeleteSensor : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.delete(url, headers=headers)
print("\n*** Answer testDeleteSensor : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
# Delete first sensor
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.delete(url, headers=headers)
print("\n*** Answer testDeleteSensor : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
# Delete second sensor
url = endpoint + "temperature/sensor/" + str(sensor_id2)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.delete(url, headers=headers)
self.assertEqual(req.status_code, 200, msg=req.status_code)
# Get all sensors
url = endpoint + "temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testGetAllSensor : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
#self.assertEqual(len(req.json()['sensor']), 0)
def test_5_EventpoolStart(self):
req = self.login('test', 'test')
token = req.json()['data']
url = endpoint + "/temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
payload = {
'name':'test3',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - first'
}
req = requests.post(url, headers=headers, json=payload)
global sensor_id3
sensor_id3 = req.json()['data']
self.assertEqual(req.status_code, 201, msg=req.status_code)
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/start/5"
req = requests.get(url)
print("*** Answer testEventpoolStart : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testEventpoolStart : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
req = requests.get(url, headers=headers)
print("\n*** Answer testEventpoolStart : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testEventpoolStart : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print("READING SENSOR: ", sensor_id3)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
# Sleep for 16 s
print("We will sleep for 16 s. Please check server that test_function has been triggered")
time.sleep(16)
def test_6_EventpoolStop(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/stop"
req = requests.get(url)
print("*** Answer testEventpoolStop : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testEventpoolStop : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
req = requests.get(url, headers=headers)
print("\n*** Answer testEventpoolStop : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testEventpoolStartStop : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
# ===== REMOVE SENSOR =====
url = endpoint + "temperature/sensor/" + str(sensor_id3)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.delete(url, headers=headers)
self.assertEqual(req.status_code, 200, msg=req.status_code)
def test_7_ReadTemp(self):
# ====== ADDING ONE SENSOR ======
req = self.login('test', 'test')
token = req.json()['data']
url = endpoint + "/temperature/sensor"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
payload = {
'name':'test1',
'folder':'28-0516b501daff',
'position':'test_position',
'unit':'c',
'comment':'test sensor - first'
}
req = requests.post(url, headers=headers, json=payload)
print("*** Answer testReadTemp - Adding sensor : WITH TOKEN ***")
print("URL: ", url)
print("PAYLOAD: ", payload)
print("HEADERS: ", headers)
print(req.text)
global sensor_id
sensor_id = req.json()['data']
self.assertEqual(req.status_code, 201, msg=req.status_code)
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/read/" + str(sensor_id)
req = requests.get(url)
print("*** Answer testReadTemp : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testReadTemp : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
req = requests.get(url, headers=headers)
print("\n*** Answer testReadTemp : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testReadTemp : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
# ===== REMOVE THE SENSOR =====
url = endpoint + "temperature/sensor/" + str(sensor_id)
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.delete(url, headers=headers)
self.assertEqual(req.status_code, 200, msg=req.status_code)
def test_8_GetTemp(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature"
req = requests.get(url)
print("*** Answer testGetTemp : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 405, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testGetTemp : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
url = endpoint + "temperature"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.post(url, headers=headers)
print("\n*** Answer testGetTemp : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
dt =datetime.utcnow() - timedelta(seconds=30)
payload = {
'sensor' : sensor_id3,
'start_date' : dt.strftime('%Y-%m-%d %H:%M:%S'),
'end_date' : datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
req = requests.post(url, headers=headers)
print("*** Answer testGetTemp : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print("PAYLOAD: ", payload)
print("GET VALUES FROM SENSOR: ", sensor_id3)
print(req.text)
global temperature_rowid
print(req.json())
temperature_rowid = req.json()['data'][0][0]
self.assertEqual(req.status_code, 200, msg=req.status_code)
def test_9_DeleteTemp(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/" + str(temperature_rowid)
req = requests.delete(url)
print("*** Answer testDeleteTemp : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testDeleteTemp : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
req = requests.delete(url, headers=headers)
print("\n*** Answer testDeleteTemp : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.delete(url, headers=headers)
print("*** Answer testDeleteTemp : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print("DELETE TEMPERATURE ROW: ", temperature_rowid)
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
def test_10_GetDevices(self):
# ===== TEST WITH WRONG HEADER =====
url = endpoint + "temperature/devices"
print(url)
req = requests.get(url)
print("*** Answer testGetDevices : WRONG HEADER ***")
print("URL: ", url)
print("ANSWER: ", req.text)
self.assertEqual(req.status_code, 401, msg=req.status_code)
# ===== TEST WRONG METHOD =====
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format('')}
req = requests.put(url, headers=headers)
print("*** Answer testGetDevices : WRONG METHOD ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code,405, msg=req.status_code)
# ===== TEST WITHOUT TOKEN =====
req = requests.get(url, headers=headers)
print("\n*** Answer testGetDevices : WITHOUT TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print(req.text)
self.assertEqual(req.status_code, 422, msg=req.status_code)
# ===== TEST WITH TOKEN =====
req = self.login('test', 'test')
token = req.json()['data']
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(token)}
req = requests.get(url, headers=headers)
print("*** Answer testGetDevices : WITH TOKEN ***")
print("URL: ", url)
print("HEADERS: ", headers)
print("DEVICES ON NODE: ", req.json()['data'])
print(req.text)
self.assertEqual(req.status_code, 200, msg=req.status_code)
#===============================================================
# INTERNAL METHODS
#===============================================================
def login(self, user, pwd) -> requests:
url = endpoint + "auth/login"
headers = {'Content-Type': 'application/json'}
payload = {
'username':user,
'password':pwd
}
return requests.post(url, headers=headers, json=payload)
# Run REST_quality unittest
if __name__ == '__main__':
unittest.main()
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
if __name__ == "__main__":
"""
A simple script to plot the balance of the portfolio, or
"equity curve", as a function of time.
"""
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
equity_file = open("equity.csv")
equity = pd.io.parsers.read_csv(
equity_file, parse_dates=True, header=0, index_col=0
)
# Plot three charts: Equity curve, period returns, drawdowns
fig = plt.figure()
fig.patch.set_facecolor('white') # Set the outer colour to white
# Plot the equity curve
ax1 = fig.add_subplot(311, ylabel='Portfolio value')
equity["Equity"].plot(ax=ax1, color=sns.color_palette()[0])
# Plot the returns
ax2 = fig.add_subplot(312, ylabel='Period returns')
equity['Returns'].plot(ax=ax2, color=sns.color_palette()[1])
# Plot the returns
ax3 = fig.add_subplot(313, ylabel='Drawdowns')
equity['Drawdown'].plot(ax=ax3, color=sns.color_palette()[2])
# Plot the figure
fig.subplots_adjust(hspace=1)
plt.show() |
import random
import string
from collections import defaultdict , Counter
from extractors import hash_collection
from sklearn.feature_extraction import DictVectorizer
def count_substrings(string , dic, max_len = 30):
for i in range(len(string)):
dic[str2quad(string[i])] += 1
k = max(0 , i-max_len)
for j in range(k , i):
dic [str2quad(string[j:i+1] ) ] += 1
def count_substrings_str(string , dic, max_len = 30 , prob = 1.0):
for i in range(len(string)):
dic[string[i]] += 1
k = max(0 , i-max_len)
for j in range(k , i):
dic [string[j:i+1]] += 1
def count_by_len(string , dic ,max_len=30 , prob=1.0):
for k in range(1,max_len):
k_dic = dic[k]
for i in range(k-1, len(string)):
if random.random() < prob:
k_dic[ string[i-k+1 : i+1] ] += 1
def count_all_by_len(arr_of_strings , max_len = 30, prob=1.0):
dic = defaultdict(Counter)
#handle a single strng
if type(arr_of_strings) == str:
count_by_len(arr_of_strings, dic, max_len, prob)
return dic
for string in arr_of_strings:
count_by_len(string , dic , max_len , prob)
return dic
def compute_zscore(dic_by_len , threshold):
epsilon = 10 ** - 4
remove = []
v = DictVectorizer(sparse=False)
for length in dic_by_len:
dic_t = dic_by_len[length]
dic = {key:dic_t[key] for key in dic_t if dic_t[key] >= threshold}
if len(dic) < 1:
continue
vector = v.fit_transform(dic)
mean = vector.mean()
std = vector.std()
if std < epsilon:
remove.append(length)
continue
for key in dic:
dic[key] = (dic[key] - mean) /std
dic_by_len[length] = Counter(dic)
for l in remove:
del dic_by_len[l]
return hash_collection.HashCollection(dic_by_len)
alphabet = list(string.ascii_lowercase) +['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
base = len(alphabet)
char2index = {alphabet[i]:i for i in range(len(alphabet))}
def str2quad(string):
as_quad = 0L
exp = 1
for i in range(len(string)):
as_quad += exp * char2index[string[i]]
exp *= base
return as_quad
|
"""
Developed by ExtremeGeneration.it (2018)
http://www.extremegeneration.it
Made available under GNU GENERAL PUBLIC LICENSE
# Subscribe to our Youtube Channel http://www.youtube.com/user/eXtremegenerationIT
# for many other tutorials
# Download the Raspberry Pi APP
# https://play.google.com/store/apps/details?id=it.extremegeneration.raspberrypiprogetti
"""
import driverLCD # Custom library to use the LCD display with Raspberry Pi
import urllib.request
import json
import time
# Custom CryptoCurrency Symbols
bitcoin_symbol = [
[0b01010,
0b11110,
0b01001,
0b01001,
0b01110,
0b01001,
0b11111,
0b01010]]
ethereum_symbol = [
[0b00100,
0b01110,
0b11111,
0b11111,
0b01110,
0b11111,
0b01110,
0b00100]]
# Initialize LCD screen
LCDscreen = driverLCD.lcd()
# Set BackLight 1=activate, 0=deactivate
LCDscreen.backlight(1)
# Define Scroll Text Function
# Pass the text, the column and number of iterations
def scroll_text(text, iterations, column=2, row=1, speed=0.2):
# create string with 16 empty spaces
str_pad = " " * 16
iterate = 0
while (iterations > iterate):
for i in range(0, len(text)):
lcd_text = text[i:(i + 16)]
LCDscreen.lcd_display_string(lcd_text, column, row)
time.sleep(speed)
LCDscreen.lcd_display_string(str_pad, column, row)
iterate += 1
# Initialize, greeting message & brand
scroll_text(' Crypto Tracker - ExtremeGeneration.it', column=1, iterations=1, row=1)
def query_Api():
# The cryptocompare API allows to compare several crypto-currencies
data = urllib.request.urlopen(
'https://min-api.cryptocompare.com/data/pricemultifull?fsyms=ETH,IOT,BTC&tsyms=BTC,USD,EUR,ETH,IOT').read()
dataJSON = json.loads(data.decode('utf-8'))
return dataJSON
while True:
# Query the API
dataJSON = query_Api()
# Function to compute the sign (+ or -) of the percentage variation
sign = lambda x: '+' if x > 0 else ""
#################### Show BITCOIN DATA ###########################
# Get % daily
BTC_open_euro = float(dataJSON['RAW']['BTC']['EUR']['OPEN24HOUR'])
BTC_current_euro = float(dataJSON['RAW']['BTC']['EUR']['PRICE'])
BTC_var_percentage = ((BTC_current_euro - BTC_open_euro) / BTC_open_euro) * 100
# Get ETH/BTC price
ETH_BTC = float(dataJSON['RAW']['BTC']['ETH']['PRICE'])
# Get IOTA/BTC price
IOTA_BTC = float(dataJSON['RAW']['BTC']['IOT']['PRICE'])
# Display Bitcoin symbol
LCDscreen.lcd_load_custom_chars(bitcoin_symbol)
LCDscreen.lcd_write(0X80)
LCDscreen.lcd_write_char(0)
# Show row 1
LCDscreen.lcd_display_string('itcoin {}{:04.2f}%'.format(sign(BTC_var_percentage), BTC_var_percentage), 1, 1)
# Create a String for the second column
BTC_feed = (' EUR {:04.2f} ETH {:04.2f} IOTA {:04.2f}'.format(BTC_current_euro, ETH_BTC, IOTA_BTC))
# Show row 2 (scrolling)
scroll_text(BTC_feed, speed=0.4, iterations=2, row=1)
# Clean the LCD Display
LCDscreen.lcd_clear()
########################## Show ETHEREUM DATA ########################
# Get % daily
ETH_open_euro = float(dataJSON['RAW']['ETH']['EUR']['OPEN24HOUR'])
ETH_current_euro = float(dataJSON['RAW']['ETH']['EUR']['PRICE'])
ETH_var_percentage = ((ETH_current_euro - ETH_open_euro) / ETH_open_euro) * 100
# Get BTC/ETH price
BTC_ETH = float(dataJSON['RAW']['ETH']['BTC']['PRICE'])
# Get IOTA/ETH price
IOTA_ETH = float(dataJSON['RAW']['ETH']['IOT']['PRICE'])
# Display Ethereum symbol
LCDscreen.lcd_load_custom_chars(ethereum_symbol)
LCDscreen.lcd_write(0X80)
LCDscreen.lcd_write_char(0) # Set row
# Show row 1
LCDscreen.lcd_display_string('thereum {}{:04.2f}%'.format(sign(ETH_var_percentage), ETH_var_percentage), 1, 1)
# Create a String for the second column
ETH_feed = (' EUR {:04.2f} BTC {:04.2f} IOTA {:04.2f}'.format(ETH_current_euro, BTC_ETH, IOTA_ETH))
# Show row 2 (scrolling)
scroll_text(ETH_feed, speed=0.4, iterations=2, row=1)
# Clean the LCD Display
LCDscreen.lcd_clear()
##########################SHow IOTA DATA###########################
# Get % daily
IOTA_open_euro = float(dataJSON['RAW']['IOT']['EUR']['OPEN24HOUR'])
IOTA_current_euro = float(dataJSON['RAW']['IOT']['EUR']['PRICE'])
IOTA_var_percentage = ((IOTA_current_euro - IOTA_open_euro) / IOTA_open_euro) * 100
# Get BTC/IOTA price
BTC_IOTA = float(dataJSON['RAW']['IOT']['BTC']['PRICE'])
# Get ETH/IOTA price
ETH_IOTA = float(dataJSON['RAW']['IOT']['ETH']['PRICE'])
# Show row 1
LCDscreen.lcd_display_string('IOTA {}{:04.2f}%'.format(sign(IOTA_var_percentage), IOTA_var_percentage), 1, 1)
# Create a String for the second column
IOTA_feed = (' EUR {:04.2f} BTC {:04.6f} ETH {:04.6f}'.format(IOTA_current_euro, BTC_IOTA, ETH_IOTA))
# Show row 2 (scrolling)
scroll_text(IOTA_feed, speed=0.4, iterations=2, row=1)
# Clean the LCD Display
LCDscreen.lcd_clear()
|
from gesture_controller.commands import ALL_COMMANDS
from gesture_controller.controller import GestureController
gesture_contoller = GestureController(
commands=[c() for c in ALL_COMMANDS]
)
gesture_contoller.run_inference()
|
import sexpr
import sys
import os
from pprint import pprint
from subprocess import Popen, PIPE
fname = sys.argv[1]
name = os.path.basename(fname).split('.')[0]
file = open(fname)
source = ""
for line in file.readlines():
if line[0] != "#":
source += line
sexpr.input(source)
s = sexpr.parse()
while len(s) == 1:
s = s[0]
table = {}
for x in s:
table[x[0]] = x[1:]
class Element():
def __init__(self,name):
self.name = name
self.cfg = []
self.inputs = []
self.outputs = []
def canelide(self):
if len(self.cfg) == 0:
if len(self.inputs) == 0 and len(self.outputs) == 1:
return self.outputs[0] == self.name
elif len(self.inputs) == 1 and len(self.outputs) == 0:
return self.inputs[0] == self.name
return False
class Primitive():
def __init__(self,sexpr):
self.name = sexpr[1]
#pprint(sexpr)
input,output = Element("input"),Element("output")
self.elements = [ input, output ]
self.connections = {} # (e0,outputpin,e1,inputpin) => true
for i in sexpr[4:]:
if i[0] == "pin":
if i[3] == "input":
input.outputs.append(i[2])
self.connections[ ("input",i[2],i[1],i[2]) ] = True
else:
output.inputs.append(i[2])
self.connections[ (i[1],i[2],"output",i[2]) ] = True
elif i[0] == "element":
e = Element(i[1])
self.elements.append(e)
for ii in i[2:]:
if isinstance(ii,list):
if ii[0] == "pin":
getattr(e,ii[2]+"s").append(ii[1])
elif ii[0] == "conn":
if ii[3] == "==>":
self.connections[ (ii[1],ii[2],ii[4],ii[5]) ] = True
else:
self.connections[ (ii[4],ii[5],ii[1],ii[2]) ] = True
elif ii[0] == "cfg":
e.cfg = ii[1:]
def save(self):
print("Saving %s" % self.name)
p = Popen(["dot","-Tpdf","-o","%s_%s.pdf" % (self.name,name)], stdin=PIPE)
f = p.stdin
def write(s):
f.write(s)
if self.name == "PCIE_3_0":
sys.stdout.write(s)
write("digraph G {\n")
write(" graph [rankdir = LR];\n")
write(" node[shape=record];\n")
for e in self.elements:
def namefmt(xs):
return "|".join([ "<%s>%s" % (x,x) for x in xs])
def quote(x):
return """ \\"%s\\" """ % x.replace("<","\\<").replace(">","\\>").replace("|","\\|")
cfgstring = '\\n'.join([quote(x) for x in e.cfg])
if e.canelide():
write(""" %s[label="<%s>%s"];\n""" % (e.name,e.name,e.name))
else:
write(""" %s[label="{ {%s} | %s\\n%s | {%s} }"];\n""" % (e.name,namefmt(e.inputs),e.name,cfgstring,namefmt(e.outputs)))
for t in self.connections.keys():
write(" %s:%s -> %s:%s;\n" % t)
write("}")
f.close()
if p.wait() != 0:
raise
for i in table["primitive_defs"]:
if i[0] == "primitive_def":
p = Primitive(i)
try:
p.save()
except:
print("Failed to save %s" % p.name)
|
def sum1(x, y, z):
if x == y or y == z or x == z:
add = 0
else:
add = x + y + z
return add
print(sum1(3, 5, 10)) |
import numpy as np
# environment class used to build the Multi-Armed-Bandit environment
class PricingEnv:
# constructor method
def __init__(self, n_arms, conversion_rates):
# n_arms contains the integer number of arms
self.n_arms = n_arms
# probabilities is a vector of probabilities which defines how likely is an arm to give a reward
# Example: [array([0. , 0.03448276, ... 0.96551724, 1. ])]
self.conversion_rates = conversion_rates
# function round returns the reward given from pulled_arm
def round(self, pulled_arm):
reward = np.random.binomial(1, self.conversion_rates[pulled_arm])
return reward
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Fullsim")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
# replace with long barrel geometry
process.load("SLHCUpgradeSimulations.Geometry.longbarrel_cmsIdealGeometryXML_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'MC_31X_V8::All'
process.siPixelFakeGainOfflineESSource = cms.ESSource("SiPixelFakeGainOfflineESSource",
file = cms.FileInPath('SLHCUpgradeSimulations/Geometry/data/longbarrel/PixelSkimmedGeometry_empty.txt')
)
process.es_prefer_fake_gain = cms.ESPrefer("SiPixelFakeGainOfflineESSource","siPixelFakeGainOfflineESSource")
process.siPixelFakeLorentzAngleESSource = cms.ESSource("SiPixelFakeLorentzAngleESSource",
file = cms.FileInPath('SLHCUpgradeSimulations/Geometry/data/longbarrel/PixelSkimmedGeometry.txt')
)
process.es_prefer_fake_lorentz = cms.ESPrefer("SiPixelFakeLorentzAngleESSource","siPixelFakeLorentzAngleESSource")
process.load("FWCore/MessageService/MessageLogger_cfi")
#process.MessageLogger.destinations = cms.untracked.vstring("detailedInfo_fullLBmu50")
#process.MessageLogger.detailedInfo_strawb_mu50 = cms.untracked.PSet(threshold = cms.untracked.string('DEBUG'))
#process.MessageLogger.debugModules= cms.untracked.vstring("PixelGeom")
# this config fragment brings you the generator information
process.load("Configuration.StandardSequences.Generator_cff")
# this config frament brings you 3 steps of the detector simulation:
# -- vertex smearing (IR modeling)
# -- G4-based hit level detector simulation
# -- digitization (electronics readout modeling)
# it returns 2 sequences :
# -- psim (vtx smearing + G4 sim)
# -- pdigi (digitization in all subsystems, i.e. tracker=pix+sistrips,
# cal=ecal+ecal-0-suppression+hcal), muon=csc+dt+rpc)
#
process.load("Configuration.StandardSequences.Simulation_cff")
process.TrackerDigiGeometryESModule.applyAlignment = False
# please note the IMPORTANT:
# in order to operate Digis, one needs to include Mixing module
# (pileup modeling), at least in the 0-pileup mode
#
# There're 3 possible configurations of the Mixing module :
# no-pileup, low luminosity pileup, and high luminosity pileup
#
# they come, respectively, through the 3 config fragments below
#
# *each* config returns label "mix"; thus you canNOT have them
# all together in the same configuration, but only one !!!
#
process.load("Configuration.StandardSequences.MixingNoPileUp_cff")
#include "Configuration/StandardSequences/data/MixingLowLumiPileUp.cff"
#include "Configuration/StandardSequences/data/MixingHighLumiPileUp.cff"
process.load("Configuration.StandardSequences.L1Emulator_cff")
process.load("Configuration.StandardSequences.DigiToRaw_cff")
process.load("Configuration.StandardSequences.RawToDigi_cff")
#process.load("Configuration.StandardSequences.VtxSmearedBetafuncEarlyCollision_cff")
process.load("Configuration.StandardSequences.VtxSmearedGauss_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("SimTracker.Configuration.SimTracker_cff")
process.simSiPixelDigis.MissCalibrate = False
process.simSiPixelDigis.LorentzAngle_DB = False
process.simSiPixelDigis.killModules = False
process.simSiPixelDigis.NumPixelBarrel = cms.int32(14)
process.simSiPixelDigis.NumPixelEndcap = cms.int32(3)
## set pixel inefficiency if we want it
## 100% efficiency
process.simSiPixelDigis.AddPixelInefficiency = -1
## static efficiency
#process.simSiPixelDigis.AddPixelInefficiency = 0 #--Hec (default = -1)
#process.simSiPixelDigis.PixelEff = 0.99 #--Hec (default = 1)
#process.simSiPixelDigis.PixelColEff = 0.99 #--Hec (default = 1)
#process.simSiPixelDigis.PixelChipEff = 0.99 #--Hec (default = 1)
# Note only static is implemented for upgrade geometries
#--PixelIneff = -1 Default Value (No Inefficiency. eff=100%)
# = 0 Static Efficiency
# > 0 Luminosity rate dependent ineff
# 1,2 - low-lumi rate dependent inefficency added
# 10 - high-lumi inefficiency added
process.siPixelClusters.src = 'simSiPixelDigis'
process.siPixelClusters.MissCalibrate = False
process.simSiStripDigis.ROUList = cms.vstring("g4SimHitsTrackerHitsPixelBarrelLowTof")
#Setting this to "" gives error in pdigi
#process.siStripZeroSuppression.RawDigiProducersList[0].RawDigiProducer = 'simSiStripDigis'
#process.siStripZeroSuppression.RawDigiProducersList[1].RawDigiProducer = 'simSiStripDigis'
#process.siStripZeroSuppression.RawDigiProducersList[2].RawDigiProducer = 'simSiStripDigis'
#process.siStripClusters.DigiProducersList[0].DigiProducer= 'simSiStripDigis'
process.siStripZeroSuppression.RawDigiProducersList[0].RawDigiProducer = ''
process.siStripZeroSuppression.RawDigiProducersList[1].RawDigiProducer = ''
process.siStripZeroSuppression.RawDigiProducersList[2].RawDigiProducer = ''
process.siStripClusters.DigiProducersList[0].DigiProducer= ''
#
# change from default of 8bit ADC (255) for stack layers (1=1 bit, 7=3 bits)
# need to change both digitizer and clusterizer
#process.simSiPixelDigis.AdcFullScaleStack = cms.int32(1)
#process.siPixelClusters.AdcFullScaleStack = cms.int32(1)
# probably no need to change default stack layer start
#process.simSiPixelDigis.FirstStackLayer = cms.int32(5)
#process.siPixelClusters.FirstStackLayer = cms.int32(5)
# Event output
process.load("Configuration.EventContent.EventContent_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.load("FastSimulation/Configuration/FlatPtMuonGun_cfi")
process.generator.PGunParameters.MinPt = 0.9
process.generator.PGunParameters.MaxPt = 50.0
process.generator.PGunParameters.MinEta = -2.4
process.generator.PGunParameters.MaxEta = 2.4
process.generator.Verbosity = 1
process.generator.AddAntiParticle = True
process.FEVT = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('/uscms_data/d2/cheung/slhc/testfullLB_muon_50GeV.root')
)
process.load("Validation.RecoTrack.cutsTPEffic_cfi")
process.load("Validation.RecoTrack.cutsTPFake_cfi")
process.load("SimTracker.TrackAssociation.TrackAssociatorByChi2_cfi")
process.load("SimTracker.TrackAssociation.TrackAssociatorByHits_cfi")
process.load("Validation.RecoTrack.MultiTrackValidator_cff")
#process.multiTrackValidator.label = ['generalTracks']
### if using simple (non-iterative) or old (as in 1_8_4) tracking
process.multiTrackValidator.label = ['ctfWithMaterialTracks']
#process.multiTrackValidator.label = ['cutsRecoTracks']
#process.multiTrackValidator.label_tp_effic = cms.InputTag("cutsTPEffic")
#process.multiTrackValidator.label_tp_fake = cms.InputTag("cutsTPFake")
process.multiTrackValidator.associators = ['TrackAssociatorByHits']
process.multiTrackValidator.UseAssociators = True
process.multiTrackValidator.outputFile = "validfullLB_muon_50GeV.root"
process.multiTrackValidator.nint = cms.int32(20)
process.multiTrackValidator.nintpT = cms.int32(25)
process.multiTrackValidator.maxpT = cms.double(50.0)
process.multiTrackValidator.skipHistoFit = False
##### with John's changes ##############################
process.load("SLHCUpgradeSimulations.Geometry.oldTracking_wtriplets")
process.PixelLayerTriplets.layerList = cms.vstring('BPix1+BPix2+BPix3',
'BPix1+BPix3+BPix4',
'BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix4',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'BPix1+FPix2_pos+FPix3_pos',
'BPix1+FPix2_neg+FPix3_neg',
'FPix1_pos+FPix2_pos+FPix3_pos',
'FPix1_neg+FPix2_neg+FPix3_neg')
# restrict vertex fining in trackingtruthprod to smaller volume (note: these numbers in mm)
process.mergedtruth.volumeRadius = cms.double(100.0)
process.mergedtruth.volumeZ = cms.double(900.0)
process.mergedtruth.discardOutVolume = cms.bool(True)
process.mergedtruth.simHitCollections.pixel = cms.vstring('g4SimHitsTrackerHitsPixelBarrelLowTof',
'g4SimHitsTrackerHitsPixelBarrelHighTof',
'g4SimHitsTrackerHitsPixelEndcapLowTof',
'g4SimHitsTrackerHitsPixelEndcapHighTof')
process.mergedtruth.simHitCollections.tracker = []
process.mergedtruth.simHitCollections.muon = []
process.cutsTPFake.tip = cms.double(10.0)
process.cutsTPFake.lip = cms.double(90.0)
#NB: tracks are already filtered by the generalTracks sequence
#for additional cuts use the cutsRecoTracks filter:
#process.load("Validation.RecoTrack.cutsRecoTracks_cfi")
#process.cutsRecoTracks.src = cms.InputTag("ctfWithMaterialTracks")
#process.cutsRecoTracks.quality = cms.vstring('')
#process.cutsRecoTracks.minHit = cms.int32(3)
#process.cutsRecoTracks.minHit = cms.int32(8)
#process.cutsRecoTracks.minHit = cms.int32(6)
############ end John's changes ###########################
### make sure the correct (modified) error routine is used
process.siPixelRecHits.CPE = 'PixelCPEfromTrackAngle'
process.MeasurementTracker.PixelCPE = 'PixelCPEfromTrackAngle'
process.ttrhbwr.PixelCPE = 'PixelCPEfromTrackAngle'
process.MixedLayerPairs.BPix.TTRHBuilder = cms.string('WithTrackAngle')
process.MixedLayerPairs.FPix.TTRHBuilder = cms.string('WithTrackAngle')
process.PixelLayerTriplets.BPix.TTRHBuilder = cms.string('WithTrackAngle')
process.PixelLayerTriplets.FPix.TTRHBuilder = cms.string('WithTrackAngle')
process.ctfWithMaterialTracks.TTRHBuilder = cms.string('WithTrackAngle')
process.MeasurementTracker.stripClusterProducer=cms.string('')
process.MeasurementTracker.inactiveStripDetectorLabels = cms.VInputTag()
process.MeasurementTracker.UseStripModuleQualityDB = cms.bool(False)
process.MeasurementTracker.UseStripAPVFiberQualityDB = cms.bool(False)
#Prevent strips...
#next may not be needed
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
process.TrackRefitter.TTRHBuilder = cms.string('WithTrackAngle')
#next may not be needed
process.load("RecoTracker.SiTrackerMRHTools.SiTrackerMultiRecHitUpdator_cff")
process.siTrackerMultiRecHitUpdator.TTRHBuilder = cms.string('WithTrackAngle')
#replace with correct component in cloned version (replace with original TTRH producer)
#Where has this gone? idr 29/9/9 : From RecoTracker/Configuration/python/RecoTracker_cff.py
#process.preFilterFirstStepTracks.TTRHBuilder = cms.string('WithTrackAngle')
process.secPixelRecHits.CPE = cms.string('PixelCPEfromTrackAngle')
process.seclayertriplets.BPix.TTRHBuilder = cms.string('WithTrackAngle')
process.seclayertriplets.FPix.TTRHBuilder = cms.string('WithTrackAngle')
process.secMeasurementTracker.PixelCPE = cms.string('PixelCPEfromTrackAngle')
process.secWithMaterialTracks.TTRHBuilder = cms.string('WithTrackAngle')
process.thPixelRecHits.CPE = cms.string('PixelCPEfromTrackAngle')
process.thlayerpairs.BPix.TTRHBuilder = cms.string('WithTrackAngle')
process.thlayerpairs.FPix.TTRHBuilder = cms.string('WithTrackAngle')
process.thMeasurementTracker.PixelCPE = cms.string('PixelCPEfromTrackAngle')
process.thWithMaterialTracks.TTRHBuilder = cms.string('WithTrackAngle')
### produce an ntuple with pixel hits for analysis
process.ReadLocalMeasurement = cms.EDAnalyzer("StdHitNtuplizer",
src = cms.InputTag("siPixelRecHits"),
stereoRecHits = cms.InputTag(""),
rphiRecHits = cms.InputTag(""),
matchedRecHits = cms.InputTag(''),
#trackProducer = cms.InputTag("generalTracks"),
### if using simple (non-iterative) or old (as in 1_8_4) tracking
trackProducer = cms.InputTag("ctfWithMaterialTracks"),
OutputFile = cms.string("stdgrechitfullLB_ntuple.root"),
### for using track hit association
associatePixel = cms.bool(True),
associateStrip = cms.bool(False),
associateRecoTracks = cms.bool(False),
ROUList = cms.vstring('g4SimHitsTrackerHitsPixelBarrelLowTof',
'g4SimHitsTrackerHitsPixelBarrelHighTof',
'g4SimHitsTrackerHitsPixelEndcapLowTof',
'g4SimHitsTrackerHitsPixelEndcapHighTof')
)
### modules to write output navigational information for tracking
#process.Tracer = cms.Service("Tracer",
# indentation = cms.untracked.string('$$')
#)
#process.navigationSchoolAnalyzer = cms.EDAnalyzer("NavigationSchoolAnalyzer",
# navigationSchoolName = cms.string('SimpleNavigationSchool')
#)
process.Timing = cms.Service("Timing")
process.p0 = cms.Path(process.generator+process.pgen)
process.p1 = cms.Path(process.psim)
process.p2 = cms.Path(process.pdigi)
process.p3 = cms.Path(process.L1Emulator)
#process.p4 = cms.Path(process.DigiToRaw)
#process.p5 = cms.Path(process.RawToDigi)
#process.p5 = cms.Path(process.trackerlocalreco)
process.p5 = cms.Path(process.pixeltrackerlocalreco)
process.p6 = cms.Path(process.offlineBeamSpot+process.oldTracking_wtriplets)
#process.p6 = cms.Path(process.offlineBeamSpot+process.recopixelvertexing*process.ckftracks)
#process.p6 = cms.Path(process.reconstruction)
process.p7 = cms.Path(process.cutsTPEffic*process.cutsTPFake*process.multiTrackValidator)
#process.p7 = cms.Path(process.cutsTPEffic*process.cutsTPFake*process.cutsRecoTracks*process.multiTrackValidator)
#process.p7 = cms.Path(process.trackingParticles*process.cutsTPEffic*process.cutsTPFake*process.multiTrackValidator)
#process.p8 = cms.Path(process.writedet)
#process.p8 = cms.Path(process.navigationSchoolAnalyzer)
#process.p8 = cms.Path(process.ReadLocalMeasurement)
process.outpath = cms.EndPath(process.FEVT)
#process.schedule = cms.Schedule(process.p0,process.p1,process.p2,process.p3,process.p4,process.p5,process.p6,process.p7,process.outpath)
#process.schedule = cms.Schedule(process.p0,process.p1,process.p2,process.p3,process.p5,process.outpath)
process.schedule = cms.Schedule(process.p0,process.p1,process.p2,process.p3,process.p5,process.p6,process.p7)
|
import argparse
import sys
# import importlib
# import os
# ROOT_DIR = os.path.abspath(os.curdir)
# sys.path.append(f'{ROOT_DIR}')
# roots = ROOT_DIR.split("/")[-1:]
# print('.'.join(roots))
#
#
# io = importlib.import_module(f"{'.'.join(roots)}.io")
# utility_module = importlib.import_module(f"{'.'.join(roots)}.")
# mbr = importlib.import_module(f"{'.'.join(roots)}.mbr")
from mbr_nmt.io import read_samples_file
from mbr_nmt.utility import parse_utility
from mbr_nmt.mbr import mbr
def translate(args):
fout = sys.stdout
# Read and process input arguments.
if args.candidates and not args.num_candidates:
raise Exception("Must set --num-candidates if --candidates/-c is given.")
S = read_samples_file(args.samples, args.num_samples, add_eos=args.add_eos)
C = read_samples_file(args.candidates, args.num_candidates, add_eos=args.add_eos) if args.candidates else None
if C is not None and len(C) != len(S):
raise Exception("Different dataset size for candidates and samples.")
utility = parse_utility(args.utility, lang=args.lang)
# Run MBR on the entire dataset.
for sequence_idx, samples in enumerate(S):
candidates = C[sequence_idx] if C else None
pred = mbr(samples, utility,
candidates=candidates,
return_matrix=False,
subsample_size=args.subsample_size)
fout.write("{}\n".format(" ".join(pred)))
def create_parser(subparsers=None):
description = "mbr-nmt translate: pick an optimal translation according to minimum Bayes risk decoding"
if subparsers is None:
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
else:
parser = subparsers.add_parser("translate", description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--samples", "-s", type=str, required=True,
help="File containing translation samples, one per line, in order of input sequence.")
parser.add_argument("--num-samples", "-n", type=int, required=True,
help="Number of samples per input sequence.")
parser.add_argument("--utility", "-u", type=str, required=True,
help="Utility function to maximize.", choices=["unigram-precision", "beer", "meteor"])
parser.add_argument("--candidates", "-c", type=str,
help="File containing translation candidates, one per line, in order of input sequence. "
"If not given, assumed to be equal to --samples/-s.")
parser.add_argument("--num-candidates", "-m", type=int,
help="Number of candidates per input sequence, only used if --candidates/-c is set.")
parser.add_argument("--lang", "-l", type=str, default="en",
help="Language code used to inform METEOR.")
parser.add_argument("--subsample-size", type=int,
help="If set, a smaller uniformly sampled subsample is used to compute expectations "
"for faster runtime.")
parser.add_argument("--add-eos", action="store_true",
help="Add an EOS token to every sample and candidate. "
"This is useful for dealing with empty sequences.")
return parser
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
translate(args)
|
from lxml import etree
import re
import nltk.data
import os
import csv
import argparse
from tqdm import tqdm
def get_debate_text(debate_path, filename, data_path):
'''
Filter the text of each speech and label pairwise the sentences with 'same' or 'change'.
Write the new files to data_path.
:param debate_path: path to debate files
:param filename: filename of debate
:param data_path: path where the new files should be stored
'''
# get data from debate
with open(os.path.join(debate_path, filename), 'rb') as file:
debate = file.read()
# parser
parser = etree.XMLParser(encoding='ISO-8859-1')
# get root element
root = etree.fromstring(debate, parser=parser)
# get speech elements
speech_elems = [child for child in root.getchildren() if child.tag == 'speech']
# need this for stripping the tags out of the text
clean = re.compile('<.*?>')
# all sentences with speaker id as tupel (sentence, speaker_id)
all_sent = []
# sentence detector to separate text into sentences
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
# get the whole speech text without any tags
for elem in speech_elems:
# get clean text
elem.text = re.sub(clean, '', etree.tostring(elem).decode("ISO-8859-1"))
# get speaker id
speaker_id = elem.attrib['speakerid'] if 'speakerid' in elem.attrib else 'nospeaker'
# list of sentences in this speech tag
list_sent = sent_detector.tokenize(elem.text.strip())
# put tuple of (sentence, speaker_id) in list
for sentence in list_sent:
all_sent.append((sentence, speaker_id))
return all_sent
if __name__== "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument('--debate_path', type=str, help='Path to dabates xml files.', required=True)
parser.add_argument('--data_path', type=str, help='Name of folder to which the new file should be written.', default='data')
args = parser.parse_args()
# get arguments
DEBATE_PATH = args.debate_path
DATA_PATH = args.data_path
# create new folder for the instances files
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
same = 0
change = 0
# write sentences to file
with open(os.path.join(DATA_PATH, 'debates_sents.csv'), 'w') as csv_file:
csv_out = csv.writer(csv_file)
print("Writing instances to file '%s'." % (DATA_PATH + "debates_sents.csv"))
# write (sentence, next sentence, label) to file
csv_out.writerow(['sent1', 'sent2', 'label', 'speaker_id'])
# iterate over debate files
for file in tqdm(os.listdir(DEBATE_PATH)):
if file.endswith('.xml'):
all_sent = get_debate_text(DEBATE_PATH, file, DATA_PATH)
for i in range(len(all_sent) - 1):
sent = all_sent[i][0]
speaker_id = all_sent[i][1]
sent_next = all_sent[i + 1][0]
speaker_id_next = all_sent[i + 1][1]
# label data
# 0: same, 1: change
# check if there was a change in the speaker
if speaker_id == speaker_id_next:
label = 0
same += 1
else:
label = 1
change += 1
# write (sentence, next sentence, label) to file
csv_out.writerow((sent, sent_next, label, speaker_id_next))
print('same: ', same)
print('change: ', change)
print('Wrote instances in files to %s.' % DATA_PATH)
|
# -*- coding: utf-8 -*-
import gzip
import chardet
import datetime
import time
import logging
from urllib import request
from io import BytesIO
from urllib.error import HTTPError
from urllib.error import URLError
'''HTTP访问封装模块'''
'''消息头'''
HEAD = {'Accept' : '*/*',
'Accept-Language' : 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
'Connection' : 'keep-alive',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; rv:25.0) Gecko/20100101 Firefox/25.0'}
def getRequest(url, data = None, headers = HEAD, origin_req_host = None, unverifiable = False, method = 'GET'):
'''获取request对象'''
return request.Request(url, data = data, headers = headers, origin_req_host = origin_req_host, unverifiable = unverifiable, method=method)
def getResponse(url, data = None, headers = HEAD, origin_req_host = None, unverifiable = False, method = 'GET'):
'''获取response对象'''
return request.urlopen(getRequest(url, data, headers, origin_req_host, unverifiable, method))
def getResponseData(url, headers = HEAD):
'''获取http返回数据,可以处理gzip压缩数据,并将各种数据编码转换为utf8格式'''
logger = logging.getLogger('GzxSpider')
try:
resp = getResponse(url, headers = headers)
contentEncoding = resp.headers.get('Content-Encoding')
respData = resp.read()
except HTTPError as httperror:
logger.error('%s. The url is %s' % (repr(httperror), url))
return b''
except URLError as urlerror:
logger.error('%s. The url is %s' % (repr(urlerror), url))
return b''
except Exception as e:
logger.error('%s. The url is %s' % (repr(e), url))
return b''
# 如果为gzip文件,解压为字节流
if contentEncoding == 'gzip':
f = open('page.gzip', 'wb')
f.write(respData)
f.close()
respData = gzip.GzipFile(fileobj = BytesIO(respData)).read()
# 判断当前字节流编码,最终转码为utf8
stringEncode = chardet.detect(respData[0:2056])['encoding'].lower()
if stringEncode.startswith('gb'):
stringEncode = 'gb18030'
try:
data = respData.decode(stringEncode).encode('utf-8')
except UnicodeDecodeError as decodeerror:
logger.warning('%s. The url is %s' % (repr(decodeerror), url))
except Exception as e:
logger.error('%s. The url is %s' % (repr(e), url))
data = b''
finally:
return data
|
from rest_framework.routers import DefaultRouter
from addresses.api import views
router = DefaultRouter()
router.register('', views.AddressApi, basename='address')
urlpatterns = router.urls
|
#!/usr/bin/env python3
# Batteries
from functools import partial
import hashlib
import json
import os
import os.path as osp
import sys
import urllib.request
import shutil
# Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler, Stream
from datetime import datetime
from pprint import pprint
# Local
import model
# loads Twitter credentials from .twitter file
# that is in the same directory as this script
file_dir = os.path.dirname(osp.realpath(__file__))
with open(osp.join(file_dir, '.twitter-pass')) as \
twitter_file:
twitter_cred = json.load(twitter_file)
# authentication from the credentials file above
access_token = twitter_cred['access_token']
access_token_secret = twitter_cred['access_token_secret']
consumer_key = twitter_cred['consumer_key']
consumer_secret = twitter_cred['consumer_secret']
def sign_path(filename, SIGTYPE=hashlib.md5):
'''
input : filename and accumulating signature
output : unique identifier for set of filename
'''
with open(filename, mode='rb') as f:
d = SIGTYPE()
for buf in iter(partial(f.read, 128), b''):
d.update(buf)
return d.hexdigest()
class StdOutListener(StreamListener):
'''
A listener handles tweets that are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
'''
def __init__(self, filename):
self.filename = filename
# this is the event handler for new data
@model.pny.db_session
def on_data(self, data):
print('*', end='', file=sys.stderr)
tweet = json.loads(data)
pprint(tweet)
uid_str = tweet['user']['id_str']
message = tweet['text']
pid_str = tweet['id_str']
U = model.User.get(id=uid_str)
P = model.Post(id=pid_str, text=message, user=U)
place = tweet['place']
if place:
geo_id = place['id']
media = tweet.get('extended_entities', dict()).get('media', [])
for entry in media:
post_type = entry['type']
if post_type == 'photo':
url = entry['media_url']
Media = model.Image
elif post_type == 'video':
urls = entry['video_info']['variants']
vfilt = lambda x: x.get('content_type', '').find('video/') == 0
url = next(filter(vfilt, urls))['url']
Media = model.Video
else:
continue
_, ext = url.rsplit('.', 1)
filename, headers = urllib.request.urlretrieve(url)
new_name = sign_path(filename) + '.' + ext
shutil.move(filename, '.' + osp.sep + new_name)
Media(id=new_name, post=P)
# this is the event handler for errors
def on_error(self, status):
print(status, file=sys.stderr)
def interface(ifname):
with open(ifname) as fd:
teams = json.load(fd)
users = set()
for T in teams:
for U in teams[T]:
users.add(U)
listener = StdOutListener(osp.join(file_dir, 'tweets.txt'))
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
print('Use CTRL + C to exit at any time.\n')
stream = Stream(auth, listener)
stream.filter(follow=users)
if __name__ == '__main__':
try:
inpath = sys.argv[1]
except:
print("usage: {} <inpath>".format(
sys.argv[0]))
sys.exit(1)
interface(inpath)
|
import os
import datetime
import jinja2
import webapp2
from google.appengine.ext import ndb
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
class Post(ndb.Model):
subject = ndb.StringProperty(required = True)
content = ndb.TextProperty(required = True)
createdDate = ndb.DateProperty(auto_now_add = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class BlogHandler(Handler):
def get(self):
topTenPosts = ndb.gql(
"SELECT * FROM Post ORDER BY createdDate ASC LIMIT 10").fetch()
self.render("landing_page.html", posts=topTenPosts)
class NewPostHandler(Handler):
def get(self):
self.render("new_post.html",
subject="",
subjectError="",
content="",
contentError="")
def post(self):
subject = self.request.get("subject")
subjectError = ''
content = self.request.get("content")
contentError = ''
if subject and content:
newPost = Post(subject=subject,
content=content)
newPostKey = newPost.put()
postId = newPostKey.id()
self.redirect("/blog/" + str(postId))
elif not subject and not content:
subjectError = 'Subject can not be empty!'
contentError = 'Content can not be empty!'
elif not subject:
subjectError = 'Subject can not be empty!'
else:
contentError = 'Content can not be empty!'
self.render("new_post.html",subject=subject,subjectError=subjectError,
content=content,contentError=contentError)
class PostHandler(Handler):
def get(self, url):
postId = long(url)
postKey = ndb.Key('Post', postId)
post = postKey.get()
subject = post.subject
content = post.content
createdDate = post.createdDate
self.render("post.html",
subject=subject,
createdDate=createdDate,
content=content,
postId=postId)
class HomePageHandler(Handler):
def get(self):
blog="/blog"
self.render("homepage.html",
blog=blog)
app = webapp2.WSGIApplication([('/', HomePageHandler),
('/blog', BlogHandler),
('/blog/newpost', NewPostHandler),
('/blog/(\d+)', PostHandler),
],
debug=True) |
#!/usr/bin/python
#coding:utf-8
"""
将配置文件保存到mysql数据库的脚本
2013/9/10 www
"""
import sys
import MySQLdb
import os
import json
import time
import urllib
import codecs
reload(sys)
sys.setdefaultencoding('utf-8')
#con = file('/crontab-all.txt','r')
#total = len(con.readlines())
#con.seek(0,0)
#e = 1
#for i in xrange(total):
# conf = con.readline().split(' ')[-1]
# page = urllib.urlopen(conf[7:])
# confi= page.read()
# page.close()
# fi = codecs.open('/confi/%s.conf'%e,'w','utf-8')
# fi.write(confi)
# fi.close()
# e +=1
#con.close()
di = {1:'news',2:'bbs',3:'blog',4:'mblog'}
values = []
for root,dirs,files in os.walk('/configs'):
for filepath in files:
pathname = os.path.join(root,filepath)
if str(pathname)[-4:] == 'conf':
fi=file(pathname,'r')
fil = fi.read()
try:
dicts = json.loads(fil)
except ValueError,e:
print False,str(e),pathname
continue
fi.close()
filename = dicts.get('site',None)
if isinstance(filename,unicode):
filename = filename.encode('utf-8')
domain = dicts.get('domains',None)
if domain != None:
domain = domain[0]
filetype = dicts['fields'].get('category',None)
if filetype != None:
filetype = int(filetype['value'])
if filetype >= 5:
print domain,filename,pathname
continue
cg = di[filetype]
channel = dicts['fields'].get('channel',None)
if channel != None:
channel = channel['value']
else :
channel = '未创建'
#if isinstance(channel,unicode):
# channel = channel.encode('utf-8')
value = (cg,str(domain),filename,channel,str(pathname)[1:],120,time.strftime('%Y-%m-%d %H-%M-%S'),'暂无更新','root')
values.append(value)
conn = MySQLdb.connect(user='root',passwd="",host='localhost',charset='utf8')
conn.query("set names 'utf8'")
conn.select_db('webot')
cursor = conn.cursor()
cursor.executemany('insert into blog_config_task (category_name,domain,domain_name,channel,config_path,priority_seconds,create_datetime,update_datetime,user) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)',values)
cursor.close()
conn.close()
print 'ok'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 18-02-2021
"""
from .noise_augmentation import compute_noise_augmented_samples
from .noise_generation import generate_babble, generate_ssn
from .select_noise import select_split_noise_files
from .split_noise import split_noise_files
def compute_noised():
select_split_noise_files()
generate_ssn()
generate_babble()
split_noise_files()
compute_noise_augmented_samples()
if __name__ == "__main__":
compute_noised()
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import asynctest
import mock
import pytest
from paasta_tools import drain_lib
def test_register_drain_method():
with mock.patch.dict(drain_lib._drain_methods):
@drain_lib.register_drain_method("FAKEDRAINMETHOD")
class FakeDrainMethod(drain_lib.DrainMethod):
pass
assert (
type(drain_lib.get_drain_method("FAKEDRAINMETHOD", "srv", "inst", "ns"))
== FakeDrainMethod
)
@contextlib.contextmanager
def mock_ClientSession(**fake_session_kwargs):
fake_session = asynctest.MagicMock(name="session", **fake_session_kwargs)
class FakeClientSession:
def __init__(self, *args, **kwargs):
...
async def __aenter__(*args):
return fake_session
async def __aexit__(*args):
pass
with mock.patch("aiohttp.ClientSession", new=FakeClientSession, autospec=False):
yield
class TestHacheckDrainMethod:
drain_method = drain_lib.HacheckDrainMethod(
service="srv",
instance="inst",
registrations=["ns_one", "ns_two"],
hacheck_port=12345,
)
async def _async_id(self, x):
return x
def test_spool_urls(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
actual = self.drain_method.spool_urls(fake_task)
# Nerve hits /{mode}/{service}.{namespace}/{port}/status
expected = [
f"http://fake_host:12345/spool/{ns}/54321/status"
for ns in self.drain_method.registrations
]
assert actual == expected
@pytest.mark.asyncio
async def test_for_each_registration_with_no_ports(self):
fake_task = mock.Mock(host="fake_host", ports=[])
actual = await self.drain_method.for_each_registration(
task=fake_task, func=self._async_id
)
assert actual is None
@pytest.mark.asyncio
async def test_for_each_registration(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
actual = await self.drain_method.for_each_registration(
task=fake_task, func=self._async_id
)
assert actual == self.drain_method.spool_urls(fake_task)
@pytest.mark.asyncio
async def test_is_draining_yes(self):
fake_response = mock.Mock(
status=503,
text=asynctest.CoroutineMock(
return_value="Service service in down state since 1435694078.778886 "
"until 1435694178.780000: Drained by Paasta"
),
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
with mock_ClientSession(
get=mock.Mock(
return_value=asynctest.MagicMock(
__aenter__=asynctest.CoroutineMock(return_value=fake_response)
)
)
):
assert await self.drain_method.is_draining(fake_task) is True
@pytest.mark.asyncio
async def test_is_draining_no(self):
fake_response = mock.Mock(
status=200, text=asynctest.CoroutineMock(return_value="")
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
with mock_ClientSession(
get=mock.Mock(
return_value=asynctest.MagicMock(
__aenter__=asynctest.CoroutineMock(return_value=fake_response)
)
)
):
assert await self.drain_method.is_draining(fake_task) is False
class TestHTTPDrainMethod:
def test_get_format_params(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
assert drain_method.get_format_params(fake_task) == [
{
"host": "fake_host",
"port": 54321,
"service": "fake_service",
"instance": "fake_instance",
"nerve_ns": "fake_nerve_ns",
}
]
def test_format_url(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
url_format = "foo_{host}"
format_params = {"host": "fake_host"}
assert drain_method.format_url(url_format, format_params) == "foo_fake_host"
def test_parse_success_codes(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
assert drain_method.parse_success_codes("200") == {200}
assert drain_method.parse_success_codes("200-203") == {200, 201, 202, 203}
assert drain_method.parse_success_codes("200-202,302,305-306") == {
200,
201,
202,
302,
305,
305,
306,
}
assert drain_method.parse_success_codes(200) == {200}
def test_check_response_code(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
# Happy case
assert drain_method.check_response_code(200, "200-299") is True
# Sad case
assert drain_method.check_response_code(500, "200-299") is False
@pytest.mark.asyncio
async def test_issue_request(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
url_spec = {
"url_format": "http://localhost:654321/fake/{host}",
"method": "get",
"success_codes": "1234",
}
fake_resp = mock.Mock(status=1234)
mock_request = mock.Mock(
return_value=asynctest.CoroutineMock(return_value=fake_resp)()
)
with mock_ClientSession(request=mock_request):
await drain_method.issue_request(url_spec=url_spec, task=fake_task)
mock_request.assert_called_once_with(
method="GET",
url="http://localhost:654321/fake/fake_host",
headers=mock.ANY,
timeout=15,
)
|
import numpy as np
import gdspy
# STEP 1:
lib = gdspy.GdsLibrary(precision = 1e-10)
# create a new cell to save
gc = lib.new_cell("GC")
ground = lib.new_cell("ground")
tooth = lib.new_cell("tooth")
# define the index of layer
ld_fulletch = {"layer": 1, "datatype": 1}
ld_grating = {"layer": 2, "datatype": 1}
##################################################################
# parameters setting
##################################################################
xp = 32 # xp is the length of first tooth
xp2 = 29.84 + 33 # xp2 is the length of end fanshshape
w_etch = 70 # width of etch
h_etch = 60 # hight of etch
l_wg = 2 # the length of straight waveguide
w_wg = 0.44 # the width of waveguide
a = 0.29 # width of etched tooth
b = 0.350 # width of remain tooth
GC_theta = 10/180*np.pi
period = 30
##################################################################
# create the rect region
points = [(w_etch, -h_etch/2), (w_etch, h_etch/2), (-l_wg/2, h_etch/2), (-l_wg/2, -h_etch/2)]
rect = gdspy.Polygon(points, **ld_fulletch)
# create the straight waveguide
points = [(l_wg/2, -w_wg/2), (l_wg/2, w_wg/2), (-l_wg/2, w_wg/2), (-l_wg/2, -w_wg/2)]
wg = gdspy.Polygon(points, **ld_fulletch)
# create the fanshape
arc = gdspy.Round(
(0, 0),
xp2,
inner_radius=0,
initial_angle= -GC_theta,
final_angle= GC_theta,
number_of_points=128,
**ld_fulletch
)
gd = gdspy.boolean(wg, arc, 'or', precision=1e-10, max_points=199, layer=1, datatype=1)
ground.add(gd)
# create the tooth
i=0
while i < period:
arc = gdspy.Round(
(0, 0),
xp+a,
inner_radius=xp,
initial_angle= -GC_theta,
final_angle= GC_theta,
number_of_points=128,
**ld_grating
)
tooth.add(arc)
pitch = a + b
xp += pitch
i += 1
inv = gdspy.boolean(gd, rect, 'xor', precision=1e-10, max_points=199, layer=0, datatype=1)
inv = gdspy.boolean(inv, tooth, 'or', precision=1e-10, max_points=199, layer=0, datatype=1)
gc.add(inv)
gc.add(gd)
gc.add(tooth)
lib.write_gds('FocusGC-2.gds')
gdspy.LayoutViewer() |
import datetime
import heapq
import re
from abc import ABC, abstractmethod
from pathlib import Path
from docker.models.services import Service
from docker.errors import NotFound
from docker.types import SecretReference
from typing import Optional, Set, Tuple, Union
from .docker_utils import DockerSecretsUtil
from nwmaas.scheduler.rsa_key_pair import RsaKeyPair
class SshKeyUtil(ABC):
"""
Abstraction for an object for providing a number of SSH keys for exclusive use.
"""
@abstractmethod
def acquire_ssh_rsa_key(self) -> RsaKeyPair:
"""
Retrieve, register, and return a previously not-in-use RSA key pair, either from the reuse pool or from being
newly generated.
Returns
-------
RsaKeyPair
A previously not-in-use RSA key pair, registered as in-use immediately before being returned
"""
pass
@abstractmethod
def get_existing_keys(self) -> Set[RsaKeyPair]:
"""
Return all known and managed key pairs for this instance, including all those currently acquired and all those
in the reuse pool.
Returns
-------
Set[RsaKeyPair]
The set of all managed ::class:`RsaKeyPair` objects
"""
pass
@abstractmethod
def get_registered_keys(self) -> Set[RsaKeyPair]:
"""
Return the set of all registered key pairs.
Return the set of all registered RsaKeyPair objects; i.e., those known to be currently in use.
Returns
-------
Set[RsaKeyPair]
All currently registered key pairs
"""
pass
@property
@abstractmethod
def max_reuse(self):
pass
@abstractmethod
def register_ssh_rsa_key(self, key_pair: RsaKeyPair, prior_usages: int = 0):
"""
Manually register an existing key pair as being in-use, also optionally setting the number of prior usages.
Previously registered key pairs will result in no action being performed.
Out-of-range ``prior_usage`` values will be replaced with 0.
Parameters
----------
key_pair
prior_usages
"""
pass
@abstractmethod
def release_ssh_rsa_key(self, key_pair: RsaKeyPair):
"""
Release a registered RSA key pair once it is no longer needed for exclusive use, either making it available for
reuse or retiring the key.
Parameters
----------
key_pair : RsaKeyPair
A key pair that is no longer needed for exclusive use
"""
pass
@property
@abstractmethod
def ssh_keys_directory(self) -> Path:
"""
Get the path to the directory containing the registered SSH keys' backing files on the file system.
Returns
-------
Path
The path to the directory containing the registered SSH keys' backing files on the file system.
"""
pass
class SshKeyUtilImpl(SshKeyUtil):
"""
An object for providing a number of SSH keys for exclusive use.
"""
def __init__(self, ssh_keys_directory: Path, reusable_pool_size: int = 0, max_reuse: int = 10):
"""
Initialize an object.
The ``ssh_keys_directory`` argument will be used to set the ::attribute:`_ssh_keys_directory` attribute, except
if there is an existing, non-directory file already at that path. In these cases, the parent directory of such
a file will be used.
In cases when the directory at ::attribute:`_ssh_keys_directory` does not exist, it will be created.
Reuse pool size and max reuse count are limited to the interval [0, 25]. When the argument for setting such an
attribute is outside the valid range, the attribute will be set to the closest in-range value.
By default, ``reusable_pool_size`` is set to 0, which results in reuse being disabled. The default for
``max_reuse`` is 10.
Parameters
----------
ssh_keys_directory : Path
A path to a working directory where actual key files will be maintained.
reusable_pool_size : int
The maximum number - in the interval [0, 25] - of previously acquired key pair objects to keep in a pool
after being released to be reused.
max_reuse : int
The maximum number of times - in the interval [0, 25] - a released key pair object may be placed in the
reuse pool.
"""
if ssh_keys_directory.exists():
self._ssh_keys_directory = ssh_keys_directory if ssh_keys_directory.is_dir() else ssh_keys_directory.parent
else:
self._ssh_keys_directory = ssh_keys_directory
self._ssh_keys_directory.mkdir()
self._pool_size = 0 if reusable_pool_size < 1 else (25 if reusable_pool_size > 25 else reusable_pool_size)
self._max_reuse = 0 if max_reuse < 1 else (25 if max_reuse > 25 else max_reuse)
# TODO: search directory for existing keys and either retire or make available for use
# A dictionary of currently acquired key pair objects to prior usage counts (i.e., on first use, this is 0)
self._registered_keys_usage_counts = dict()
# A heap of tuples: (usage count, key pair object)
self._reuse_pool = []
def acquire_ssh_rsa_key(self) -> RsaKeyPair:
"""
Retrieve, register, and return a previously not-in-use RSA key pair, either from the reuse pool or from being
newly generated.
Returns
-------
RsaKeyPair
A previously not-in-use RSA key pair, registered as in-use immediately before being returned
"""
if len(self._reuse_pool) > 0:
usages, key_pair = heapq.heappop()
else:
timestamp_based_name = '{}_id_rsa'.format(str(datetime.datetime.now().timestamp()))
key_pair = RsaKeyPair(directory=self._ssh_keys_directory, name=timestamp_based_name)
usages = 0
self._registered_keys_usage_counts[key_pair] = usages
return key_pair
def get_existing_keys(self) -> Set[RsaKeyPair]:
"""
Return all known and managed key pairs for this instance, including all those currently acquired and all those
in the reuse pool.
Returns
-------
Set[RsaKeyPair]
The set of all managed ::class:`RsaKeyPair` objects
"""
key_pairs = set(self.get_registered_keys())
for t in self._reuse_pool:
key_pairs.add(t[1])
return key_pairs
def get_registered_keys(self) -> Set[RsaKeyPair]:
"""
Return the set of all registered key pairs.
Return the set of all registered RsaKeyPair objects; i.e., those known to be currently in use.
Returns
-------
Set[RsaKeyPair]
All currently registered key pairs
"""
key_pairs = set(self._registered_keys_usage_counts.keys())
return key_pairs
@property
def max_reuse(self):
return self._max_reuse
def register_ssh_rsa_key(self, key_pair: RsaKeyPair, prior_usages: int = 0):
"""
Manually register an existing key pair as being in-use, also optionally setting the number of prior usages.
Previously registered key pairs will result in no action being performed.
Out-of-range ``prior_usage`` values will be replaced with 0.
Parameters
----------
key_pair
prior_usages
"""
if key_pair not in self._registered_keys_usage_counts:
self._registered_keys_usage_counts[key_pair] = prior_usages if 0 <= prior_usages <= self._max_reuse else 0
def release_ssh_rsa_key(self, key_pair: RsaKeyPair):
"""
Release a registered RSA key pair once it is no longer needed for exclusive use, either making it available for
reuse or retiring the key.
Parameters
----------
key_pair : RsaKeyPair
A key pair that is no longer needed for exclusive use
"""
if key_pair is None:
# TODO: consider doing something else here
return
if key_pair not in self._registered_keys_usage_counts:
raise RuntimeError("Unexpected key pair released with private key file: {}".format(
str(key_pair.private_key_file)))
# Get prior usage and increment by one for this time
usage = self._registered_keys_usage_counts.pop(key_pair) + 1
# If pool is full or this key has been reused to the max, just clean it up
if usage >= self._max_reuse or len(self._reuse_pool) >= self._pool_size:
key_pair.delete_key_files()
# Otherwise, add to heap
else:
heapq.heappush(self._reuse_pool, (usage, key_pair))
@property
def ssh_keys_directory(self) -> Path:
return self._ssh_keys_directory
class DecoratingSshKeyUtil(SshKeyUtil, ABC):
"""
Extension of ::class:`SshKeyUtil` that decorates an inner, nested instance, received during instantiation.
Implementations of the abstract methods in ::class:`SshKeyUtil` are all basically "pass-through." Each method
simply performs a call to the same method of the nested ::class:`SshKeyUtil` attribute, passing through the same
arguments, and (when applicable) returning the result.
"""
def __init__(self, ssh_key_util: SshKeyUtil):
self._ssh_key_util = ssh_key_util
def acquire_ssh_rsa_key(self) -> RsaKeyPair:
"""
Retrieve, register, and return a previously not-in-use RSA key pair, either from the reuse pool or from being
newly generated.
Returns
-------
RsaKeyPair
A previously not-in-use RSA key pair, registered as in-use immediately before being returned
"""
return self._ssh_key_util.acquire_ssh_rsa_key()
def get_existing_keys(self) -> Set[RsaKeyPair]:
"""
Return all known and managed key pairs for this instance, including all those currently acquired and all those
in the reuse pool.
Returns
-------
Set[RsaKeyPair]
The set of all managed ::class:`RsaKeyPair` objects
"""
return self._ssh_key_util.get_existing_keys()
def get_registered_keys(self) -> Set[RsaKeyPair]:
"""
Return the set of all registered key pairs.
Return the set of all registered RsaKeyPair objects; i.e., those known to be currently in use.
Returns
-------
Set[RsaKeyPair]
All currently registered key pairs
"""
return self._ssh_key_util.get_registered_keys()
@property
def max_reuse(self):
return self._ssh_key_util.max_reuse
def register_ssh_rsa_key(self, key_pair: RsaKeyPair, prior_usages: int = 0):
"""
Manually register an existing key pair as being in-use, also optionally setting the number of prior usages.
Previously registered key pairs will result in no action being performed.
Out-of-range ``prior_usage`` values will be replaced with 0.
Parameters
----------
key_pair
prior_usages
"""
self._ssh_key_util.register_ssh_rsa_key(key_pair=key_pair, prior_usages=prior_usages)
def release_ssh_rsa_key(self, key_pair: RsaKeyPair):
"""
Release a registered RSA key pair once it is no longer needed for exclusive use, either making it available for
reuse or retiring the key.
Parameters
----------
key_pair : RsaKeyPair
A key pair that is no longer needed for exclusive use
"""
self._ssh_key_util.release_ssh_rsa_key(key_pair=key_pair)
@property
def ssh_keys_directory(self) -> Path:
return self._ssh_key_util.ssh_keys_directory
class SshKeyDockerSecretsUtil(DecoratingSshKeyUtil, DockerSecretsUtil):
"""
An extension (actually a decorator) of ::class:`SshKeyUtil` with additional functionality from the
::class:`DockerSecretsUtil` interface for creating and managing Docker secrets for the managed SSH keys.
This implementation of ::class:`SshKeyUtil` assumes that key pairs registered by this instance will all be created
having names equal to an associated Docker service id of the service that will use the keys. This invariant can
be maintained by only using the ::method:`init_key_pair_and_secrets_for_service` to register key pairs.
Additionally, to ensure no keys are reused, the prior usages of keys registered using
::method:`init_key_pair_and_secrets_for_service` will always be set to the max number allowed by the nested
::class:`SshKeyUtil` instance. This ensures they are always retired when released.
"""
@classmethod
def get_key_pair_secret_names(cls, key_pair: RsaKeyPair) -> Tuple[str, str]:
"""
Get, as a tuple of strings, the appropriate names for Docker secrets corresponding to the the private and public
keys respectively of the provided ::class:`RsaKeyPair`.
Parameters
----------
key_pair : RsaKeyPair
A key pair for which the standardized names for Docker secrets (for the private and public keys) are wanted.
Returns
-------
Tuple[str, str]
The appropriate names for the secret for the private and public keys respectively, as a tuple of two strings
"""
return '{}{}'.format(cls.get_private_key_secret_name_prefix(), key_pair.name), \
'{}{}'.format(cls.get_public_key_secret_name_prefix(), key_pair.name)
@classmethod
def get_private_key_secret_name_prefix(cls) -> str:
"""
Get the standard prefix to use for the names of Docker secrets for ::class:`RsaKeyPair` private keys.
Returns
-------
str
the standard prefix to use for the names of Docker secrets for ::class:`RsaKeyPair` private keys
"""
return 'ssh_priv_key_'
@classmethod
def get_public_key_secret_name_prefix(cls) -> str:
"""
Get the standard prefix to use for the names of Docker secrets for ::class:`RsaKeyPair` public keys.
Returns
-------
str
the standard prefix to use for the names of Docker secrets for ::class:`RsaKeyPair` public keys
"""
return 'ssh_pub_key_'
def __init__(self, ssh_key_util: SshKeyUtil, docker_client):
super(DecoratingSshKeyUtil).__init__(ssh_key_util=ssh_key_util)
super(DockerSecretsUtil).__init__(docker_client=docker_client)
def _get_key_pair_for_referenced_secret(self, ref_for_secrets: Union[str, SecretReference]) -> RsaKeyPair:
"""
Return the associated, registered ::class:`RsaKeyPair` object for a Docker secret represented by the given
implicit reference.
Return the associated ::class:`RsaKeyPair` object for the Docker secret represented implicitly by the supplied
argument, where the argument can be either the name of the key pair for the secret, the name of the secret, or
the secret's ::class:`SecretReference` object.
If a string is passed in ``ref_for_secrets``, then its format is checked to determine whether it is a secret
name. If it matches the pattern of a secret name, the key pair name substring is parsed out. If it does not,
the string is assumed to itself be a key pair name. Whether based on the entire string or the substring, the
key pair name string is used to obtain the actual registered ::class:`RsaKeyPair` from the results of
::method:`get_registered_keys`.
If a ::class:`SecretReference` is used, then its name is used as an argument in a recursive call to this method,
the result returned.
Parameters
----------
ref_for_secrets : Union[str, SecretReference]
Either the name of a key pair, the name of a key's secret, or a secret reference object for a key secret.
Returns
-------
RsaKeyPair
The associated key pair object.
Raises
-------
ValueError
If the value of ``ref_for_secrets`` cannot be used to find an associated, currently-registered
::class:`RsaKeyPair` object.
"""
if isinstance(ref_for_secrets, SecretReference):
return self._get_key_pair_for_referenced_secret(ref_for_secrets['SecretName'])
else:
priv_pattern = re.compile(self.get_private_key_secret_name_prefix() + '(.*)')
priv_match = priv_pattern.match(ref_for_secrets)
pub_pattern = re.compile(self.get_private_key_secret_name_prefix() + '(.*)')
pub_match = pub_pattern.match(ref_for_secrets)
if priv_match is not None:
key_pair_name = priv_match.group(1)
elif pub_match is not None:
key_pair_name = pub_match.group(1)
else:
key_pair_name = ref_for_secrets
kp = self._get_registered_key_pair_by_name(key_pair_name)
if kp is not None:
return kp
raise ValueError("Unrecognized name for SSH key pair or associated Docker Secret used to look up key pair "
"object ({})".format(key_pair_name))
def _get_registered_key_pair_by_name(self, name: str) -> Optional[RsaKeyPair]:
"""
Get the registered key pair with the given name, or ``None`` if there is none.
Parameters
----------
name : str
Returns
-------
Optional[RsaKeyPair]
The registered key pair with the given name, or ``None`` if there is none.
"""
for kp in self.get_registered_keys():
if kp.name == name:
return kp
return None
def _lookup_secret_by_name(self, name: str) -> Optional[SecretReference]:
"""
Look up and return the ::class:`SecretReference` object for the Docker secret having the given name, or
return ``None`` if no such secret can be found.
Parameters
----------
name : str
The name of the Docker secret of interest.
Returns
-------
Optional[SecretReference]
The ::class:`SecretReference` object for the desired Docker secret, or ``None`` if none is found.
"""
try:
return self.docker_client.secrets.get(name)
except NotFound:
return None
def acquire_ssh_rsa_key(self) -> RsaKeyPair:
"""
An override of the super-method, which should not be call directly for this implementation, and thus results in
a raised ::class:`RuntimeError`.
Raises
-------
RuntimeError
"""
raise RuntimeError('Method {} cannot be executed directly by {}; use {} instead'.format(
'acquire_ssh_rsa_key()',
self.__class__.__name__,
'init_key_pair_and_secrets_for_service(Service)'))
def get_key_pair_for_service(self, service: Service) -> Optional[RsaKeyPair]:
"""
Helper method to easily get the registered ::class:`RsaKeyPair` object associated with the given Docker service,
or ``None`` if there is no such registered key pair.
Parameters
----------
service : Service
The related Docker service.
Returns
-------
Optional[RsaKeyPair]
The registered ::class:`RsaKeyPair` object associated with the Docker service, or ``None`` if there is none.
"""
return self._get_registered_key_pair_by_name(service.name)
def init_key_pair_and_secrets_for_service(self, service: Service):
"""
Create a dedicated ::class:`RsaKeyPair` for use by this service, register the key pair, create Docker secrets
for the private and public keys, and attach the secrets to the service.
Additionally, to ensure no keys are reused, the prior usages of keys is set when registering, to the max number
allowed by the nested ::attribute:`_ssh_key_util`. This ensures keys are always retired when released.
Parameters
----------
service : Service
A Docker service.
"""
key_pair = RsaKeyPair(directory=self.ssh_keys_directory, name=service.id)
self._ssh_key_util.register_ssh_rsa_key(key_pair=key_pair, prior_usages=self._ssh_key_util.max_reuse)
priv_key_secret_name, pub_key_secret_name = self.get_key_pair_secret_names(key_pair)
private_key_secret_ref = self.create_docker_secret(name=priv_key_secret_name, data=key_pair.private_key_pem)
public_key_secret_ref = self.create_docker_secret(name=pub_key_secret_name, data=key_pair.public_key)
self.add_secrets_for_service(service, private_key_secret_ref, public_key_secret_ref)
@property
def max_reuse(self):
return 0
def register_ssh_rsa_key(self, key_pair: RsaKeyPair, prior_usages: int = 0):
"""
An override of the super-method, which should not be call directly for this implementation, and thus results in
a raised ::class:`RuntimeError`.
Parameters
----------
key_pair
prior_usages
Raises
-------
RuntimeError
"""
raise RuntimeError('Method {} cannot be executed directly by {}; use {} instead'.format(
'register_ssh_rsa_key(RsaKeyPair, int)',
self.__class__.__name__,
'init_key_pair_and_secrets_for_service(Service)'))
def release_all_for_stopped_services(self):
"""
Release any still-registered key pairs for Docker services that are no longer running, cleaning up any
associated Docker secrets as well.
The method assumes that key pairs registered by this instance and its spawned services will all be created
having names equal to the Docker service id of the service that will use the keys. As such, if the service is
no longer found, it has finished, and the key pair of the same name can be cleaned up.
"""
for key_pair in self.get_registered_keys():
try:
service = self.docker_client.services.get(key_pair.name)
except NotFound as e:
self.release_ssh_key_and_secrets(lookup_obj=key_pair, assume_service_removed=True)
def release_ssh_key_and_secrets(self,
lookup_obj: Union[RsaKeyPair, str, SecretReference],
assume_service_removed: bool = False,
should_delete: bool = True):
"""
Release the appropriate SSH-key-related Docker secrets from use by their service (if it still exists), delete
the secrets, and release the key pair.
If a string or ::class:`SecretReference` is passed in ``lookup_obj``, the
::method:`_get_key_pair_for_referenced_secret` is used to obtain the appropriate key pair object. Otherwise,
the ``lookup_obj`` is expected to itself be a ::class:`RsaKeyPair` object.
If a ::class:`RsaKeyPair` object is passed in ``lookup_obj``, then the names for the private and public key
secrets can be derived from the name of the key pair. Also, the service id is directly equal to the name of the
key pair, making the service itself easy to find.
Parameters
----------
lookup_obj : Union[RsaKeyPair, str, SecretReference]
Either the related key pair or a means of referencing it that can be used by
::method:`_get_key_pair_for_referenced_secret` to find the key pair.
assume_service_removed : bool
Whether it is safe to assume the related Docker service has already been removed, and thus it is not.
should_delete : bool
Whether the secret should be deleted/removed, which is ``True`` by default.
"""
# First, ensure we have an RsaKeyPair object
key_pair = None
if isinstance(lookup_obj, SecretReference) or isinstance(lookup_obj, str):
# Note that this will raise a ValueError here if it can't find a key pair object
key_pair = self._get_key_pair_for_referenced_secret(lookup_obj)
elif isinstance(lookup_obj, RsaKeyPair):
key_pair = lookup_obj
if key_pair is None:
raise TypeError(
"Invalid type passed to release SSH key secrets (was {})".format(lookup_obj.__class__.__name__))
# Then obtain the secrets based on knowing the key pair
private_key_secret_ref = self.docker_client.secrets.get(self.get_key_pair_secret_names(key_pair)[0])
public_key_secret_ref = self.docker_client.secrets.get(self.get_key_pair_secret_names(key_pair)[1])
# Determine if service still exists and, if so, remove secrets from it
if not assume_service_removed:
try:
service = self.docker_client.services.get(key_pair.name)
self.remove_secrets_for_service(service, private_key_secret_ref, public_key_secret_ref)
except NotFound:
pass
# Delete the secrets
if should_delete:
if private_key_secret_ref is not None:
private_key_secret_ref.remove()
if public_key_secret_ref is not None:
public_key_secret_ref.remove()
# Finally, release the key pair
self.release_ssh_rsa_key(key_pair)
|
# Script to process HeroAI data extracts
import os
import numpy as np
import pandas as pd
from funs_support import find_dir_olu
dir_base = os.getcwd()
dir_olu = find_dir_olu()
dir_output = os.path.join(dir_olu, 'output')
dir_flow = os.path.join(dir_output, 'flow')
dir_rt = os.path.join(dir_olu, 'rt')
#############################
# --- STEP 1: LOAD DATA --- #
# Existing output data
df_demo = pd.read_csv(os.path.join(dir_flow,'demo4flow.csv'))
qq = ['disposition_selected', 'bed_requested', 'bed_ready', 'Length_of_stay']
# JSON real-time data
fn_rt = 'raw_head.json'
df_json = pd.read_json(os.path.join(dir_rt,fn_rt),lines=True)
##############################
# --- STEP 2: PARSE JSON --- #
df_json.loc[0]
|
import distutils
from distutils.core import setup, Extension, Command
import os
import numpy
ext=Extension("smatch._smatch",
["smatch/smatch.c",
"smatch/vector.c",
"smatch/tree.c",
"smatch/healpix.c"])
setup(name="smatch",
packages=['smatch'],
version="0.1",
ext_modules=[ext],
include_dirs=numpy.get_include())
|
from PIL import ImageGrab
import win32gui, win32api, win32con
import time as t
class Window:
"""
Class for interacting with the Minesweeper game itself. Used for
extracting information from it by calling system level functions
and interacting with the application itself.
"""
_window_name = "Minesweeper"
_hwnd = None
def __init__(self):
if self.is_open():
self.update_window_handle()
def is_open(self):
"""
Function for checking if the Minesweeper window is actually
openend. Must be used since otherwise the window specific
actions throw exceptions.
"""
open = True
# Test wheter the window is actually open.
try:
self.update_window_handle()
hwnd = self.get_window_handle()
bounds = self.get_window_bounds()
pass
except Exception:
open = False
return open
def update_window_handle(self):
"""
Function for updating the window handle to the Minesweeper game.
"""
self._hwnd = self.get_window_handle();
def get_window_bounds(self):
"""
Function for retrieving the game's bounds. These are returned in
a Tuple as follows:
(topleft, topright, bottomright, bottomleft)
"""
rect = win32gui.GetWindowRect(self._hwnd)
return rect[0], rect[1], rect[2], rect[3]
def get_window_handle(self):
"""
Function for retrieving the window handle to the Minesweeper
window.
"""
return win32gui.FindWindow(None, self._window_name)
def focus_window(self):
"""
Function for focusing the Minesweeper game's window.
"""
win32gui.SetForegroundWindow(self._hwnd)
def get_window_image(self):
"""
Function for retrieving an image of the Minesweeper game's
window in the PIL (Pillow) format.
"""
bounds = self.get_window_bounds()
self.focus_window()
# Time needed for the window to appear on top.
t.sleep(0.001)
image = ImageGrab.grab(bbox=(bounds))
return image
def move_mouse(self, pos, is_relative = True, x_offset = -8, y_offset = -48):
"""
Function for moving the mouse on the screen.
"""
if is_relative:
# Magic numbers include the offset created by taking an image of
# the whole application window, which also includes the upper
# menu bar. The windows client to screen function on the other
# hand does not take these in consideration, which is why a
# manual offset must be applied here.
pos = win32gui.ClientToScreen(self._hwnd, (pos[0] + x_offset, pos[1] + y_offset))
win32api.SetCursorPos(pos)
def click_mouse(self, pos):
"""
Function for performing a mouseclick on the screen.
"""
self.move_mouse(pos)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, *pos, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, *pos, 0, 0) |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# !! added compatibility to dynamic computational graphs
# !! Modified and MOVED from tensorflow master to be able to use convolutional LSTM cells
"""Module for constructing RNN Cells."""
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden],
self._kernel_shape,
4*self._output_channels,
self._use_bias)
gates = array_ops.split(value=new_hidden,
num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
def _conv(args,
filter_size,
num_features,
bias,
bias_start=0.0):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3,4,5]:
raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length*[1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length*[1]
# Now the computation.
kernel = vs.get_variable(
"kernel",
filter_size + [total_arg_size_depth, num_features],
dtype=dtype)
if len(args) == 1:
res = conv_op(args[0], kernel, strides, padding='SAME')
else:
input_tensor = array_ops.concat(axis=shape_length-1, values=args)
inshape = tf.shape(input_tensor)
depth_batch_size = inshape[0]
outshape = [inshape[0], inshape[1], tf.shape(kernel)[-1]]
res = tf.cond(tf.equal(depth_batch_size, 0),
lambda: tf.zeros(outshape),
lambda: conv_op(input_tensor, kernel, strides, padding='SAME'))
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
|
# built in packages
from itertools import islice
import math
import re
import sys
import os
import csv
from collections import Counter
# dependencies
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import distance
import json
from scipy.stats import spearmanr
# import scikits.statsmodels as sm
# from statsmodels.distributions.empirical_distribution import ECDF
from nltk.tokenize import sent_tokenize as nltk_sent_tokenize
from nltk.stem import WordNetLemmatizer
# ucca
UCCA_DIR = '/home/borgr/ucca/ucca'
ASSESS_DIR = os.path.dirname(os.path.realpath(__file__)) + os.sep
# TUPA_DIR = '/cs/labs/oabend/borgr/tupa'
# UCCA_DIR = TUPA_DIR + '/ucca'
sys.path.append(UCCA_DIR + '/scripts/distances')
sys.path.append(UCCA_DIR + '/ucca')
sys.path.append(UCCA_DIR)
import align
# constants
lemmatizer = WordNetLemmatizer()
ENDERS_DEFINITELY = r"\?\!\;"
ENDERS = r"\." + ENDERS_DEFINITELY
SENTENCE_NOT_END = "[^" + ENDERS + "]"
SENTENCE_END = "[" + ENDERS + "]"
NOT_ABBREVIATION_PATTERN = re.compile(r"(.*?\s+\w\s*\.)(\s*\w\w.*)")
SENTENCE_DEFINITELY_PATTERN = re.compile(
r"(.+\s*[" + ENDERS_DEFINITELY + r"]\s*)(.+)")
SENTENCE_ENDS_WITH_NO_SPACE_PATTERN = re.compile(
"(.*?\w\w" + SENTENCE_END + ")(\w+[^\.].*)")
SPACE_BEFORE_SENTENCE_PATTERN = re.compile(
"(.*?\s" + SENTENCE_END + "(\s*\")?)(.*)")
SPECIAL_WORDS_PATTERNS = [re.compile(r"i\s*\.\s*e\s*\.", re.IGNORECASE), re.compile(
r"e\s*\.\s*g\s*\.", re.IGNORECASE), re.compile(r"\s+c\s*\.\s+", re.IGNORECASE)]
SPECIAL_WORDS_REPLACEMENTS = ["ie", "eg", " c."]
MAX_SENTENCES = 1400 # accounts for the maximum number of lines to get from the database
MAX_DIST = 2
SHORT_WORD_LEN = 4
CHANGING_RATIO = 5
PATH = ASSESS_DIR + r"/data/paragraphs/"
SARI = "sari"
MAX = "max"
BLEU = "BLEU"
SIMPLIFICATION_MEASURES = [SARI, BLEU, MAX]
ORDERED = "original order"
FIRST_LONGER = "sentence splitted"
SECOND_LONGER = "sentence concatenated"
ORDERED_ALIGNED = "ORDERED with align"
FIRST_LONGER_ALIGNED = "first longer with align"
SECOND_LONGER_ALIGNED = "second longer with align"
REMOVE_LAST = "remove last"
PARAGRAPH_END = "paragraph end"
COMMA_REPLACE_FIRST = ", in second sentence became the end of a new sentence (first longer)"
COMMA_REPLACE_SECOND = ", in first sentence became the end of a new sentence (second longer)"
NO_ALIGNED = ""
trial_name = ""
def main():
# UCCASim_conservatism()
# outputs_conservatism()
ranking_conservatism()
# reranking_simplification_conservatism("moses")
# reranking_simplification_conservatism()
# reranking_simplification_conservatism("moses", measure=MAX)
# reranking_simplification_conservatism(measure=MAX)
# reranking_simplification_conservatism("moses", measure=BLEU)
# reranking_simplification_conservatism(measure=BLEU)
def outputs_conservatism():
change_date = "160111"
filename = "results/results" + change_date + ".json"
learner_file = "conll.tok.orig"
ACL2016RozovskayaRothOutput_file = "conll14st.output.1cleaned"
char_based_file = "filtered_test.txt"
JMGR_file = "JMGR"
amu_file = "AMU"
cuui_file = "CUUI"
iitb_file = "IITB"
ipn_file = "IPN"
nthu_file = "NTHU"
pku_file = "PKU"
post_file = "POST"
rac_file = "RAC"
sjtu_file = "SJTU"
ufc_file = "UFC"
umc_file = "UMC"
camb_file = "CAMB"
gold_file = "corrected_official-2014.0.txt.comparable"
from fce import CORRECTED_FILE as fce_gold_file
from fce import LEARNER_FILE as fce_learner_file
autocorrect = read_paragraph(ACL2016RozovskayaRothOutput_file)
char_based = read_paragraph(char_based_file)
jmgr = read_paragraph(JMGR_file)
amu = read_paragraph(amu_file)
camb = read_paragraph(camb_file)
cuui = read_paragraph(cuui_file)
iitb = read_paragraph(iitb_file)
ipn = read_paragraph(ipn_file)
nthu = read_paragraph(nthu_file)
pku = read_paragraph(pku_file)
post = read_paragraph(post_file)
rac = read_paragraph(rac_file)
sjtu = read_paragraph(sjtu_file)
ufc = read_paragraph(ufc_file)
umc = read_paragraph(umc_file)
origin = read_paragraph(learner_file, preprocess_paragraph)
gold = read_paragraph(gold_file, preprocess_paragraph_minimal)
fce_gold = read_paragraph(fce_gold_file)
fce_learner = read_paragraph(fce_learner_file)
fce_learner_full = read_paragraph(
fce_learner_file, preprocess_paragraph_minimal)
fce_gold_full = read_paragraph(fce_gold_file, preprocess_paragraph_minimal)
res_list = []
old_res = read(filename) if filename else {}
for (name, res) in old_res.items():
res.append(name)
dump(res_list, filename)
# # compare fce origin to fce gold without matching
# name = "fce"
# print(name)
# if name not in old_res:
# broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(fce_learner_full, fce_gold_full, sent_token_by_char, sent_token_by_char)
# res_list.append((broken, words_differences, index_differences, spearman_differences, aligned_by, name))
# dump(res_list, filename)
# else:
# res_list.append(old_res[name])
# # compare fce origin to fce gold
# name = "fce auto"
# print(name)
# if name not in old_res:
# broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(fce_learner, fce_gold)
# res_list.append((broken, words_differences, index_differences, spearman_differences, aligned_by, name))
# dump(res_list, filename)
# else:
# res_list.append(old_res[name])
# compare gold to origin
name = "gold"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, gold, sent_tokenize_default, sent_token_by_char)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
print(len(old_res[name][1]))
broken, word_differences, index_differences, spearman_differences, aligned_by, name = old_res[
name]
origin_sentences = list(get_sentences_from_endings(origin, broken[0]))
corrected_sentences = list(get_sentences_from_endings(gold, broken[1]))
res_list.append(old_res[name])
# print("\nmany words changed")
# for i, dif in enumerate(word_differences):
# if dif > 10: # or i < 3 # use i to print some, use diff to print all sentences which differ ion more than "diff" words from each other
# print("-------\nsentences:\n", corrected_sentences[i],"\norignal:\n", origin_sentences[i])
# print ("word dif:", dif)
# print("match num:", i)
# print("\nmany indexes changed")
# for i, dif in enumerate(index_differences):
# if dif > 10: # or i < 3 # use i to print some, use diff to print all sentences which differ ion more than "diff" words from each other
# print("-------\nsentences:\n", corrected_sentences[i],"\norignal:\n", origin_sentences[i])
# print ("word dif:", dif)
# print("match num:", i)
# print("\nmany swaps changed (spearman)")
# for i, dif in enumerate(spearman_differences):
# if dif < 0.9: # or i < 3 # use i to print some, use diff to print all sentences which differ ion more than "diff" words from each other
# print("-------\nsentences:\n", corrected_sentences[i],"\norignal:\n", origin_sentences[i])
# print ("word dif:", dif)
# print("match num:", i)
# print(len(origin_sentences), len(corrected_sentences), len(word_differences))
# compare origin to cuui #1
name = "cuui"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, cuui)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to camb #2
name = "camb"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, camb)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to AMU #3
name = "amu"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, amu)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to ACL2016RozovskayaRoth autocorrect
name = "Rozovskaya Roth"
name = "RoRo"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, autocorrect)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
name = "jmgr"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, jmgr)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to Char_based_file
name = "Char"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, char_based)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to rac
name = "rac"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, rac)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to umc
name = "umc"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, umc)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to sjtu
name = "sjtu"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, sjtu)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to iitb
name = "iitb"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, iitb)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to ipn
name = "ipn"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, ipn)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to nthu
name = "nthu"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, nthu)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to pku
name = "pku"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, pku)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to post
name = "post"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, post)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
# compare origin to ufc
name = "ufc"
print(name)
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, ufc)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, filename)
else:
res_list.append(old_res[name])
dump(res_list, filename)
plot_comparison(res_list)
convert_file_to_csv(filename)
def reranking_simplification_conservatism(k_best="nisioi", measure=SARI):
change_date = "011126"
if measure == MAX:
complex_file = "simplification_rank_results_" + "max_" + k_best + "_origin"
filename = "results/simplification_reranking_results" + \
"max_" + k_best + change_date + ".json"
if measure == BLEU:
complex_file = "simplification_rank_results_" + "BLEU" + k_best + "_origin"
filename = "results/" + "simplification_reranking_results_" + "BLEU" + \
k_best + change_date + ".json"
if measure == SARI:
complex_file = "simplification_rank_results_" + k_best + "_origin"
filename = "results/simplification_reranking_results" + \
k_best + change_date + ".json"
(path, dirs, files) = next(os.walk(PATH))
filenames = []
names = []
# filenames.append("test.8turkers.tok.simp")
# names.append("gold")
for fl in files:
if "simplification" in fl and "origin" not in fl and k_best in fl:
if (measure == SARI and (all(measure not in fl for measure in SIMPLIFICATION_MEASURES))) or measure in fl:
filenames.append(fl)
names.append(fl[-5:])
if "gold" in fl:
names[-1] = "gold"
argsort = np.argsort(names)
names = np.array(names)[argsort]
filenames = np.array(filenames)[argsort]
origin = read_text(complex_file)
compare(filenames, names, filename, origin,
read_text, compare_aligned_paragraphs)
def ranking_conservatism():
change_date = "170531"
filename = "results/reranking_results" + change_date + ".json"
all_file = "first_rank_resultsALL"
BN_file = "first_rank_resultsBN"
# NUCLEA_file = "first_rank_resultsNUCLEA"
# NUCLE_file = "first_rank_resultsNUCLE"
(path, dirs, files) = next(os.walk(PATH))
filenames = []
nums = []
for fl in files:
if "subset" in fl:
filenames.append(fl)
names = [name[18:].replace("subset", " refs") for name in filenames]
for name in names:
if name[1].isdigit():
nums.append(int(name[:2]))
else:
nums.append(int(name[0]))
nums = nums + [15, 10] # , 2, 1]
filenames = filenames + [all_file, BN_file] # , NUCLE_file, NUCLEA_file]
names = names + ["all", "BN"] # , "NUCLE", "NUCLEA"]
argsort = np.argsort(nums)
names = np.array(names)
filenames = np.array(filenames)
names = names[argsort]
filenames = filenames[argsort]
print(names, filenames)
learner_file = "conll.tok.orig"
origin = read_paragraph(learner_file, preprocess_paragraph)
compare(filenames, names, filename, origin)
def UCCASim_conservatism():
change_date = "170531"
all_file = "first_rank_resultsALL"
NUCLEA_file = "first_rank_resultsNUCLEA"
ACL2016RozovskayaRothOutput_file = "conll14st.output.1cleaned"
base_rerank = "uccasim_rank_results"
filenames = [all_file, ACL2016RozovskayaRothOutput_file, NUCLEA_file] + \
[str(base) + "_" + base_rerank for base in np.linspace(0, 1, 11)]
names = ["all", "RoRo", "NUCLEA"] + \
[str(base) + "combined" for base in np.linspace(0, 1, 11)]
filename = "results/ucca_reranking_results" + change_date + ".json"
learner_file = "conll.tok.orig"
origin = read_paragraph(learner_file, preprocess_paragraph)
compare(filenames, names, filename, origin)
###########################################################
#### GENEERAL NLP ###
###########################################################
def is_word(w):
return True if w != align.EMPTY_WORD and re.search('\w', w) else False
def split_by_pattern(tokens, p, first=1, second=2):
""" gets a list of tokens and splits tokens by a compiled regex pattern
param:
tokens - list of strings representing sentence or sentences
p - compiled regex pattern or object containing method match() that returns match object
first - the group number that represents the first token found
second - the group number that represents the second token found"""
res = []
for i, token in enumerate(tokens):
matched = p.match(token)
while matched:
assert(matched.group(first) + matched.group(second) == token)
res.append(matched.group(first))
token = matched.group(second)
matched = p.match(token)
if token.strip():
res.append(token)
return res
def concat_empty(tokens):
"""concatenats empty sentences or words to the one before them in the list of tokens"""
result = []
for token in tokens:
if re.search(r"[A-Za-z][A-Za-z]", token) is not None:
result.append(token)
elif result:
result[-1] = result[-1] + token
return result
def sent_token_by_char(s, char="\n"):
"""tokenizes by predefined charachter"""
return concat_empty(s.split(char))
def sent_tokenize_default(s):
"""tokenizes a text to a list of sentences"""
tokens = nltk_sent_tokenize(s)
tokens = split_by_pattern(tokens, SENTENCE_DEFINITELY_PATTERN)
tokens = split_by_pattern(tokens, SENTENCE_ENDS_WITH_NO_SPACE_PATTERN)
tokens = split_by_pattern(tokens, SPACE_BEFORE_SENTENCE_PATTERN, 1, 3)
tokens = split_by_pattern(tokens, NOT_ABBREVIATION_PATTERN)
return concat_empty(tokens)
def word_tokenize(s):
"""tokenizes a sentence to words list"""
res = [w for w in align.word_tokenize(s) if is_word(w)]
return res
def preprocess_paragraph_minimal(p):
if p[-1] == "\n":
p = p[:-1]
return p
def preprocess_paragraph(p):
"""preprocesses a paragraph"""
for i, pattern in enumerate(SPECIAL_WORDS_PATTERNS):
p = re.sub(pattern, SPECIAL_WORDS_REPLACEMENTS[i], p)
# p = re.sub(r"\s+\.\s+", r".", p)
p = re.sub(r"(" + SENTENCE_NOT_END + ")(\s*\n)", r"\1.\2", p)
p = re.sub("(\.\s*['\"])\s*\.", r"\1", p)
p = re.sub(r"\s+", r" ", p)
p = re.sub(r"(" + SENTENCE_END + r"\s*)" + SENTENCE_END, r"\1", p)
return p
def preprocess_word(w):
if w and not w[-1].isalnum():
w = w[:-1]
return align.preprocess_word(w)
def approximately_same_word(w1, w2):
""" returns if both words are considered the same word with a small fix or not"""
l1 = lemmatizer.lemmatize(w1)
l2 = lemmatizer.lemmatize(w2)
allowed_dist = MAX_DIST if len(
l1) > SHORT_WORD_LEN and len(l2) > SHORT_WORD_LEN else 1
if (distance.levenshtein(l1, l2) > allowed_dist or
w1 == align.EMPTY_WORD or w2 == align.EMPTY_WORD):
# suggestion: should "the" "a" etc be considered in a different way?
# maybe they should not but not in this function
return False # suggestion: words such as in at on etc, might be considered all equal to each other and to the empty_word for our purpose
return True
def _choose_ending_position(sentences, endings, i):
""" i - sentence number
sentences - list of sentences
endings - list of sentences positions endings
return position, last word in the i'th sentence"""
for word in reversed(word_tokenize(sentences[i])):
word = preprocess_word(word)
if len(word) > 1:
return endings[i], word
print("sentence contains no words:\n\"", sentences[i], "\"")
print("sentence before", sentences[i - 1])
print("sentence after", sentences[i + 1])
assert(False)
return endings[i], preprocess_word(word_tokenize(sentences[i])[-1])
def index_diff(s1, s2):
""" counts the number of not aligned words in 2 sentences"""
alignment, indexes = align_sentence_words(s1, s2, True)
sorted_alignment_indexes = [(w1, w2, i1, i2)
for (w1, w2), (i1, i2) in zip(alignment, indexes)]
sorted_alignment_indexes = sorted(
sorted_alignment_indexes, key=lambda x: x[3])
last = -1
res = 0
for w1, w2, i1, i2 in sorted_alignment_indexes:
if is_word(w1) and is_word(w2):
if i1 < last:
assert (i1 != -1 and i2 != -1)
res += 1
last = i1
return res
def spearman_diff(s1, s2):
""" counts the number of not aligned words in 2 sentences"""
alignment, indexes = align_sentence_words(s1, s2, True)
sorted_alignment_indexes = [(w1, w2, i1, i2)
for (w1, w2), (i1, i2) in zip(alignment, indexes)]
sorted_alignment_indexes = sorted(
sorted_alignment_indexes, key=lambda x: x[3])
changes = 0
indexes1 = []
indexes2 = []
for w1, w2, i1, i2 in sorted_alignment_indexes:
if is_word(w1) and is_word(w2):
indexes1.append(i1)
indexes2.append(i2)
indexes1 = np.asarray(indexes1)
indexes2 = np.asarray(indexes2)
return spearmanr(indexes1, indexes2)
def word_diff(s1, s2):
""" counts the number of aligned words that are not considered approximately the same word in 2 sentences"""
alignment, indexes = align_sentence_words(s1, s2, True)
return sum(not approximately_same_word(preprocess_word(w1), preprocess_word(w2)) for i, (w1, w2) in enumerate(alignment) if is_word(w1) or is_word(w2))
def diff_words(s1, s2):
""" returns the aproximately different words in the two sentences"""
alignment, indexes = align_sentence_words(s1, s2, True)
return [(w1, w2) for i, (w1, w2) in enumerate(alignment) if (is_word(w1) or is_word(w2)) and not approximately_same_word(preprocess_word(w1), preprocess_word(w2))]
def calculate_endings(sentences, paragraph):
""" gets sentences splitted from a paragraph and returns the sentences endings positions"""
current = 0
endings = []
for s in sentences:
current += len(s)
while current < len(paragraph) and not paragraph[current].isalnum():
current += 1
endings.append(current)
return endings
def align_sentence_words(s1, s2, isString, empty_cache=False):
"""aligns words from sentence s1 to s2m, allows caching
returns arrays of word tuplds and indexes tuples"""
if empty_cache:
align_sentence_words.cache = {}
return
if (s1, s2, isString) in align_sentence_words.cache:
return align_sentence_words.cache[(s1, s2, isString)]
elif (s2, s1, isString) in align_sentence_words.cache:
return align_sentence_words.cache[(s2, s1, isString)]
else:
res = align.align(s1, s2, isString)
align_sentence_words.cache[(s2, s1, isString)] = res
return res
align_sentence_words.cache = {}
###########################################################
#### WORDS CHANGED ###
###########################################################
def aligned_ends_together(shorter, longer, reg1, reg2, addition="", force=False):
""" checks if two sentences, ending in two regularized words ends at the same place.
"""
sentence1 = shorter
sentence2 = longer + addition
addition_words = word_tokenize(addition) if addition else word_tokenize(longer)[
len(word_tokenize(shorter)):]
addition_words = set(preprocess_word(w) for w in addition_words)
tokens1 = [preprocess_word(w) for w in word_tokenize(sentence1)]
tokens2 = [preprocess_word(w) for w in word_tokenize(sentence2)]
count1 = Counter()
# if words appear more than once make each word unique by order of
# appearence
for i, token in enumerate(tokens1):
if count1[token] > 0:
tokens1[i] = str(count1[token]) + token
if is_word(token):
count1.update(token)
count2 = Counter()
for i, token in enumerate(tokens2):
if count2[token] > 0:
tokens2[i] = str(count2[token]) + token
if is_word(token):
count2.update(token)
slen1 = len(tokens1)
slen2 = len(tokens2)
if abs(slen1 - slen2) > min(slen1, slen2) / CHANGING_RATIO:
return False
aligned, indexes = align_sentence_words(sentence1, sentence2, True)
aligned = set(
map(lambda x: (preprocess_word(x[0]), preprocess_word(x[1])), aligned))
mapping = dict(aligned)
rev = dict(align.reverse_mapping(aligned))
empty = preprocess_word(align.EMPTY_WORD)
if force or ((reg1, empty) in aligned):
if approximately_same_word(reg2, rev[reg2]):
return True
if force or ((empty, reg2) in aligned):
if approximately_same_word(reg1, mapping[reg1]):
return True
return False
def break2common_sentences(p1, p2, sent_tokenize1, sent_tokenize2):
"""finds the positions of the common sentence ending
Breaking is done according to the text of both passages
returns two lists each containing positions of sentence endings
guarentees same number of positions is acquired and the last position is the passage end
return:
positions1, positions2 - lists of indexes of the changed """
aligned_by = []
s1 = sent_tokenize1(p1)
s2 = sent_tokenize2(p2)
# calculate sentence endings positions
endings1 = calculate_endings(s1, p1)
endings2 = calculate_endings(s2, p2)
# find matching endings to match
positions1 = []
positions2 = []
i = 0
j = 0
inc = False
force = False
while i < len(s1) and j < len(s2):
one_after1 = "not_initialized"
one_after2 = "not_initialized"
# create a for loop with two pointers
if inc:
i += 1
j += 1
inc = False
continue
inc = True
position1, reg1 = _choose_ending_position(s1, endings1, i)
position2, reg2 = _choose_ending_position(s2, endings2, j)
if approximately_same_word(reg1, reg2):
aligned_by.append(ORDERED)
positions1.append(position1)
positions2.append(position2)
continue
# deal with addition or subtraction of a sentence ending
slen1 = len(word_tokenize(s1[i]))
slen2 = len(word_tokenize(s2[j]))
if i + 1 < len(s1) and slen1 < slen2:
pos_after1, one_after1 = _choose_ending_position(
s1, endings1, i + 1)
if approximately_same_word(one_after1, reg2):
aligned_by.append(FIRST_LONGER)
positions1.append(pos_after1)
positions2.append(position2)
i += 1
continue
if j + 1 < len(s2) and slen2 < slen1:
pos_after2, one_after2 = _choose_ending_position(
s2, endings2, j + 1)
if approximately_same_word(reg1, one_after2):
aligned_by.append(SECOND_LONGER)
positions1.append(position1)
positions2.append(pos_after2)
j += 1
continue
# no alignment found with 2 sentences
# check if a word was added to the end of one of the sentences
if aligned_ends_together(s1[i], s2[j], reg1, reg2):
aligned_by.append(ORDERED_ALIGNED)
positions1.append(position1)
positions2.append(position2)
continue
# if no match is found twice and we had ORDERED match, it might have
# been a mistake
if (positions1 and positions2 and
aligned_by[-1] == NO_ALIGNED and aligned_by[-2] == NO_ALIGNED):
removed_pos1 = positions1.pop()
removed_pos2 = positions2.pop()
aligned_by.append(REMOVE_LAST)
i -= 3
j -= 3
position1, reg1 = _choose_ending_position(s1, endings1, i)
position2, reg2 = _choose_ending_position(s2, endings2, j)
pos_after1, one_after1 = _choose_ending_position(
s1, endings1, i + 1)
pos_after2, one_after2 = _choose_ending_position(
s2, endings2, j + 1)
pos_2after1, two_after1 = _choose_ending_position(
s1, endings1, i + 2)
pos_2after2, two_after2 = _choose_ending_position(
s2, endings2, j + 2)
force = True
# check if a word was added to the end of one of the sentences
# Also, deal with addition or subtraction of a sentence ending
if i + 1 < len(s1) and slen1 < slen2:
if aligned_ends_together(s2[j], s1[i], reg2, one_after1, addition=s1[i + 1], force=force):
aligned_by.append(FIRST_LONGER_ALIGNED)
positions1.append(pos_after1)
positions2.append(position2)
i += 1
continue
if j + 1 < len(s2) and slen2 < slen1:
if aligned_ends_together(s1[i], s2[j], reg1, one_after2, addition=s2[j + 1], force=force):
aligned_by.append(SECOND_LONGER_ALIGNED)
positions1.append(position1)
positions2.append(pos_after2)
j += 1
continue
# removing last yielded no consequences keep in regular way
if aligned_by[-1] == REMOVE_LAST:
# try 3 distance
if i + 2 < len(s1) and slen1 < slen2:
if aligned_ends_together(s2[j], s1[i], reg2, two_after1, addition=s1[i + 1] + s1[i + 2], force=force):
aligned_by.append(FIRST_LONGER_ALIGNED)
aligned_by.append(FIRST_LONGER_ALIGNED)
positions1.append(pos_2after1)
positions2.append(position2)
i += 2
continue
if j + 2 < len(s2) and slen2 < slen1:
if aligned_ends_together(s1[i], s2[j], reg1, two_after2, addition=s2[j + 1] + s2[j + 2], force=force):
aligned_by.append(SECOND_LONGER_ALIGNED)
aligned_by.append(SECOND_LONGER_ALIGNED)
positions1.append(position1)
positions2.append(pos_2after2)
j += 2
continue
# fallback was unnecesary
positions1.append(removed_pos1)
positions2.append(removed_pos2)
i += 2
j += 2
# check if a , was replaced by a sentence ender
if positions1 and slen2 < slen1:
splitter = reg2 + ","
comma_index = s1[i].find(splitter)
if comma_index == -1:
splitter = reg2 + " ,"
comma_index = s1[i].find(splitter)
if comma_index != -1:
comma_index += len(splitter)
aligned_by.append(COMMA_REPLACE_SECOND)
positions1.append(positions1[-1] + comma_index)
positions2.append(position2)
s1 = s1[:i] + [s1[i][:comma_index],
s1[i][comma_index:]] + s1[i + 1:]
endings1 = endings1[
:i] + [endings1[i - 1] + comma_index] + endings1[i:]
continue
if positions2 and slen1 < slen2:
splitter = reg1 + ","
comma_index = s2[j].find(splitter)
if comma_index == -1:
splitter = reg1 + " ,"
comma_index = s2[j].find(splitter)
if comma_index != -1:
comma_index += len(splitter)
aligned_by.append(COMMA_REPLACE_FIRST)
positions2.append(positions2[-1] + comma_index)
positions1.append(position1)
s2 = s2[:j] + [s2[j][:comma_index],
s2[j][comma_index:]] + s2[j + 1:]
endings2 = endings2[
:j] + [endings2[j - 1] + comma_index] + endings2[j:]
continue
aligned_by.append(NO_ALIGNED)
# add last sentence in case skipped
position1, reg1 = _choose_ending_position(s1, endings1, -1)
position2, reg2 = _choose_ending_position(s2, endings2, -1)
if (not positions1) or (not positions2) or (
positions1[-1] != position1 and positions2[-1] != position2):
positions1.append(endings1[-1])
positions2.append(endings2[-1])
aligned_by.append(PARAGRAPH_END)
elif positions1[-1] != position1 and positions2[-1] == position2:
positions1[-1] = endings1[-1]
aligned_by.append(PARAGRAPH_END)
elif positions1[-1] == position1 and positions2[-1] != position2:
positions2[-1] = endings2[-1]
aligned_by.append(PARAGRAPH_END)
return positions1, positions2, aligned_by
def get_sentences_from_endings(paragraph, endings):
"""a generator of sentences from a paragraph and ending positions in it"""
last = 0
for cur in endings:
yield paragraph[last:cur]
last = cur
def calculate_conservatism(origin_sentences, corrected_sentences):
print("calculating conservatism")
index_differences = [index_diff(orig, cor) for orig, cor in zip(
origin_sentences, corrected_sentences)]
spearman_differences = [spearman_diff(orig, cor)[0] for orig, cor in zip(
origin_sentences, corrected_sentences)]
word_differences = [word_diff(orig, cor) for orig, cor in zip(
origin_sentences, corrected_sentences)]
print("comparing done, printing interesting results")
for i, dif in enumerate(word_differences):
if dif > 10: # or i < 3 # use i to print some, use diff to print all sentences which differ ion more than "diff" words from each other
print("-------\nsentences:\n",
corrected_sentences[i], "\norignal:\n", origin_sentences[i])
print("word dif:", dif)
print("match num:", i)
# for i, dif in enumerate(index_differences):
# if dif > 10: # or i < 3 # use i to print some, use diff to print all sentences which differ ion more than "diff" words from each other
# print("-------\nsentences:\n",
# corrected_sentences[i], "\norignal:\n", origin_sentences[i])
# print("word dif:", dif)
# print("match num:", i)
return word_differences, index_differences, spearman_differences
def compare_aligned_paragraphs(origin, corrected, break_sent1=sent_token_by_char, break_sent2=sent_token_by_char):
origin_sentences = break_sent1(origin)
corrected_sentences = break_sent2(corrected)
broken1 = [i for i, char in enumerate(origin) if char == "\n"]
broken2 = [i for i, char in enumerate(corrected) if char == "\n"]
word_differences, index_differences, spearman_differences = calculate_conservatism(
origin_sentences, corrected_sentences)
assert len(origin_sentences) == len(corrected_sentences)
return [broken1, broken2], word_differences, index_differences, spearman_differences, [ORDERED_ALIGNED] * len(origin_sentences)
def compare_paragraphs(origin, corrected, break_sent1=sent_tokenize_default, break_sent2=sent_tokenize_default):
""" compares two paragraphs
return:
broken - the sentence endings indexes
differences - difference measures corresponding to the indexes in broken
aligned_by - the way the sentences were aligned"""
print("comparing paragraphs")
align_sentence_words(None, None, None, True)
print("aligning sentences")
broken = [None, None]
broken[0], broken[1], aligned_by = break2common_sentences(
origin, corrected, break_sent1, break_sent2)
print("assesing differences")
origin_sentences = list(get_sentences_from_endings(origin, broken[0]))
corrected_sentences = list(
get_sentences_from_endings(corrected, broken[1]))
# print(corrected_sentences)
word_differences, index_differences, spearman_differences = calculate_conservatism(
origin_sentences, corrected_sentences)
return broken, word_differences, index_differences, spearman_differences, aligned_by
def preprocess_simplification(s):
s = s.replace("-rrb-", " ")
s = s.replace("-lrb-", "")
s = s.replace(""", '"')
s = s.replace("&apos", "'")
s = re.sub(r"[ \t]+", r" ", s)
return s
def read_text(filename, process=preprocess_simplification):
with open(PATH + filename, "r") as fl:
return process(fl.read())
def read_paragraph(filename, process=preprocess_paragraph):
with open(PATH + filename) as fl:
return process("".join(islice(fl, MAX_SENTENCES)))
def extract_aligned_by_dict(a):
""" takes aligned_by list and creates a counter of ordered, first longer and second longer sentences"""
count = Counter(a)
res = Counter()
res[ORDERED] = count[ORDERED] + count[ORDERED_ALIGNED]
res[FIRST_LONGER] = count[FIRST_LONGER] + count[FIRST_LONGER_ALIGNED]
res[SECOND_LONGER] = count[SECOND_LONGER] + count[SECOND_LONGER_ALIGNED]
return res
def compare(filenames, names, backup, origin, read_paragraph=read_paragraph, compare_paragraphs=compare_paragraphs):
""" compares the conservatism of an iterable of files to an origin text
filenames - iterable containing file names of sentences
that correspond to the sentences in origin file.
One sentence per line.
names - iterable of names to call each file
backup - cache file
origin - paragran with original sentences (not a filename)
"""
contents = []
res_list = []
for filename in filenames:
contents.append(read_paragraph(filename))
# print(np.mean([len(s) for s in contents[-1].split("\n")]), filename)
# print(np.mean([len(s) for s in origin.split("\n")]), "origin")
# return
old_res = read(backup) if backup else {}
for (name, res) in old_res.items():
res.append(name)
dump(res_list, backup)
for name, content in zip(names, contents):
if name not in old_res:
broken, words_differences, index_differences, spearman_differences, aligned_by = compare_paragraphs(
origin, content)
res_list.append((broken, words_differences, index_differences,
spearman_differences, aligned_by, name))
dump(res_list, backup)
else:
res_list.append(old_res[name])
dump(res_list, backup)
plot_comparison(res_list)
convert_file_to_csv(backup)
###########################################################
#### VISUALIZATION ###
###########################################################
def create_hist(l, top=30, bottom=0):
""" converts a int counter to a sorted list for a histogram"""
count = Counter(l)
hist = [0] * (max(count.keys()) - bottom + 1)
for key, val in count.items():
if key <= top and key >= bottom:
hist[key - bottom] = val
return hist if hist else [0]
def plot_ygrid(magnitude, ymin=None, ymax=None, ax=None, alpha=0.3):
ax = init_ax(ax)
ymin, ymax = init_ylim(ymin, ymax, ax)
# not efficient if far from 0
i = 1
while magnitude * i < ymax:
y = magnitude * i
i += 1
if y > ymin:
plt.axhline(y=y, lw=0.5, color="black",
alpha=alpha, linestyle='--')
i = 0
while magnitude * i > ymin:
y = magnitude * i
i += 1
if y < ymax:
plt.axhline(y=y, lw=0.5, color="black",
alpha=alpha, linestyle='--')
def remove_spines(ax=None):
ax = init_ax(ax)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
def init_ax(ax=None):
if ax is None:
ax = plt.gca()
return ax
def init_ylim(ymin=None, ymax=None, ax=None):
ax = init_ax(ax)
if ymin is None or ymax is None:
tymin, tymax = ax.get_ylim()
if ymin is None:
ymin = tymin
if ymax is None:
ymax = tymax
return ymin, ymax
def beautify_heatmap(colorbar=None, magnitude=None, ymin=None, ymax=None, ax=None, fontsize=14):
ax = init_ax(ax)
remove_spines(ax)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
if colorbar:
colorbar.ax.tick_params(labelsize=fontsize)
def beautify_lines_graph(magnitude, ymin=None, ymax=None, ax=None, fontsize=14, ygrid_alpha=None):
ax = init_ax(ax)
remove_spines(ax)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary
# chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
# # Limit the range of the plot to only where the data is.
# # Avoid unnecessary whitespace.
# plt.ylim(ymin, ymax)
# plt.xlim(xmin, xmax)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
# Remove the tick marks; they are unnecessary with the tick lines we
# just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
data = {"ymin": ymin, "ymax": ymax,
"magnitude": magnitude, "alpha": ygrid_alpha, "ax": ax}
data = dict((k, v) for k, v in data.items() if v is not None)
plot_ygrid(**data)
def many_colors(labels, colors=cm.rainbow):
"""creates colors, each corresponding to a unique label
use for a list of colors:
example = [(230, 97, 1), (253, 184, 99),
(178, 171, 210), (94, 60, 153)]
for i in range(len(example)):
r, g, b = example[i]
example[i] = (r / 255., g / 255., b / 255.)
places with colors
https://matplotlib.org/users/colormaps.html
http://colorbrewer2.org/#type=diverging&scheme=PuOr&n=4
http://tableaufriction.blogspot.co.il/2012/11/finally-you-can-use-tableau-data-colors.html
https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
"""
cls = set(labels)
if len(cls) == 2:
return dict(zip(cls, ("blue", "orange")))
return dict(zip(cls, colors(np.linspace(0, 1, len(cls)))))
def plot_words_relative_differences_hist(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences_hist(l, ax, words_differences, "words", 0, relative_bar=0)
def plot_words_differences_hist(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences_hist(l, ax, words_differences, "words", 0)
def plot_index_differences_hist(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences_hist(l, ax, index_differences, "index", 1)
def plot_spearman_differences(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
boxplot_differences(l, ax, spearman_differences, r"$\rho$", 1)
def plot_spearman_ecdf(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_ecdf(l, ax, spearman_differences, r"$\rho$", 0.7, 1)
def plot_words_differences(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences(l, ax, words_differences, "words", 2)
def plot_words_heat(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences_heatmap(l, ax, words_differences, "words", [
0, 1, 2, 3, 4, 5, 10, 20])
def plot_index_differences(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the hists"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
plot_differences(l, ax, index_differences, "index", 1)
def plot_ecdf(l, ax, pivot, diff_type, bottom, top):
# ys = []
name = -1
colors = many_colors(range(len(l)))
for i, tple in enumerate(l):
x = np.sort(tple[pivot])
x = [point for point in x if point < top and point >= bottom]
yvals = np.arange(len(x)) / float(len(x))
# ys.append((x, tple[name], colors[i]))
if tple[name] == "gold" or "fce" in tple[name]:
ax.plot(x, yvals, "--", color=colors[i], label=tple[name])
else:
ax.plot(x, yvals, color=colors[i], label=tple[name])
plt.ylim(ymax=0.6)
# for y, name, color in ys:
# x = np.linspace(min(sample), max(sample))
# y = ecdf(x)
# ax.step(x, y, olor=color, label=name)
# ax.boxplot(x, labels=names, showmeans=True)
plt.ylabel("probability")
plt.xlabel(diff_type)
# plt.title("empirical distribution of " + diff_type + " changes")
plt.legend(loc=6, fontsize=10, fancybox=True, shadow=True)
def boxplot_differences(l, ax, pivot, diff_type, bottom):
# ys = []
x = []
names = []
name = -1
# max_len = 0
colors = many_colors(range(len(l)))
for i, tple in enumerate(l):
y = tple[pivot]
x.append(y)
names.append(tple[name])
plt.autoscale(enable=True, axis='x', tight=False)
ax.boxplot(x, labels=names, showmeans=True)
# plt.title("box plot of " + diff_type + " changes")
plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
def plot_differences_hist(l, ax, pivot, diff_type, bottom, bins=None, relative_bar=-1):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the plots"""
total_width = 1
width = total_width / len(l)
name = -1
if bins != None:
bins = np.array(bins) + 1 - bottom
relative = 0
ys = []
names = []
for i, tple in enumerate(l):
full_hist = create_hist(tple[pivot], bottom=bottom)
# print(full_hist)
if bins == None:
y = full_hist
else:
y = [sum(full_hist[:bins[0]])]
# print(full_hist[:bins[0]])
for j in range(1, len(bins)):
print(full_hist[bins[j - 1]:bins[j]])
y.append(sum(full_hist[bins[j - 1]:bins[j]]))
y.append(sum(full_hist[bins[j]:]))
relative_text = "relative_bar to column number " + \
str(relative_bar) if relative_bar >= 0 else ""
y = np.array(y)
ys.append(y)
if i == relative_bar:
relative = y
names.append(tple[name])
print(ys)
colors = many_colors(range(len(l)))
for i, (y, name) in enumerate(zip(ys, names)):
print(diff_type + " hist", relative_text,
"results", name, ":", y - relative_bar)
if relative_bar >= 0:
longer_shape = y.shape if len(y) > len(
relative) else relative.shape
print("unpadded", y, relative, longer_shape)
y = np.lib.pad(y, (0, longer_shape[0] - y.shape[0]), "constant")
relative = np.lib.pad(
relative, (0, longer_shape[0] - relative.shape[0]), "constant")
print("padded", y, relative)
x = np.array(range(len(y)))
x = x + i * width - 0.5 * total_width
ax.bar(x, y - relative, width=width,
color=colors[i], align='center', label=name, edgecolor=colors[i])
plt.autoscale(enable=True, axis='x', tight=False)
ylabel = "amount" if relative_bar < 0 else "amount relative to " + \
names[relative_bar]
plt.ylabel(ylabel)
plt.xlim(xmin=0 - 0.5 * total_width)
plt.xlabel("number of " + diff_type + " changed")
# plt.title("number of " + diff_type + " changed by method of correction")
plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
# plt.tight_layout()
# #old version
# width = 1/len(l)
# name = -1
# for i, tple in enumerate(l):
# y = create_hist(tple[pivot], bottom=bottom)
# x = np.array(range(len(y)))
# print(diff_type + " hist results ",tple[name],":",y)
# colors = many_colors(range(len(l)))
# ax.bar(x + i*width, y, width=width, color=colors[i], align='center', label=tple[name], edgecolor=colors[i])
# plt.autoscale(enable=True, axis='x', tight=False)
# plt.ylabel("amount")
# plt.xlim(xmin=0)
# plt.xlabel("number of " + diff_type + " changed")
# # plt.title("number of " + diff_type + " changed by method of correction")
# plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
# # plt.tight_layout()
def plot_aligned_by(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot """
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
width = 1 / len(l)
for i, tple in enumerate(l):
y = extract_aligned_by_dict(tple[aligned_by])
y = [y[FIRST_LONGER] + y[COMMA_REPLACE_FIRST], y[ORDERED],
y[SECOND_LONGER] + y[COMMA_REPLACE_SECOND]]
print("first ordered and second longer", tple[name], ":", y)
x = np.array(range(len(y)))
colors = many_colors(range(len(l)))
ax.bar(x + i * width, y, width=width,
color=colors[i], align='center', label=tple[name], edgecolor=colors[i])
ax.autoscale(tight=True)
plt.ylabel("amount")
plt.xlabel("number of sentence changes of that sort")
# plt.title("number of sentence changes by method of correction")
plt.xticks(x + width, ("sentences split",
ORDERED, "sentences concatanated"))
plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
# plt.tight_layout()
def plot_not_aligned(l, ax):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the bars"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
width = 1 / len(l)
start = 1 + 2 / 5
for i, tple in enumerate(l):
y = extract_aligned_by_dict(tple[aligned_by])
y = y = [y[FIRST_LONGER] + y[COMMA_REPLACE_FIRST],
y[SECOND_LONGER] + y[COMMA_REPLACE_SECOND]]
x = np.array(range(len(y)))
colors = many_colors(range(len(l)))
if tple[name] == "gold" or "fce" in tple[name]:
bar = ax.bar(x + (start + i) * width, y, width=width, color=colors[
i], align='center', label=tple[name], edgecolor="black", hatch="\\")
ax.bar(x + i * width, y, width=width /
2000000, edgecolor="w", color="w")
else:
bar = ax.bar(x + (start + i) * width, y, width=width,
color=colors[i], align='center', label=tple[name], edgecolor=colors[i])
ax.autoscale(tight=True)
plt.ylim(ymax=40)
plt.ylabel("amount")
plt.xlabel("number of sentence changes of that sort")
# plt.title("number of sentence changes by method of correction")
plt.xticks(x + width * (len(l) / 2 - 1),
("sentences split", "sentences concatanated"))
plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
# plt.tight_layout()
# cm.coolwarm_r
def plot_differences_heatmap(l, ax, pivot, diff_type, bins, colors=cm.bone_r):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the plots"""
width = 1 / len(l)
name = -1
mesh = []
names = []
top_bins = np.array(bins) + 1
for i, tple in enumerate(l):
names.append(tple[-1])
full_hist = create_hist(tple[pivot], bottom=0)
y = [sum(full_hist[:top_bins[0]])]
for j in range(1, len(top_bins)):
y.append(sum(full_hist[top_bins[j - 1]:top_bins[j]]))
y.append(sum(full_hist[top_bins[j]:]))
print(diff_type + " heatmap results ", tple[name], ":", y)
mesh.append(y)
# ax.bar(x + i*width, y, width=width, color=colors[i], align='center', label=tple[name], edgecolor=colors[i])
x = np.array(range(len(y)))
mesh = np.array(mesh)
ax.set_frame_on(False)
heatmap = ax.pcolormesh(mesh, cmap=colors)
plt.autoscale(enable=True, axis='x', tight=False)
# plt.ylabel("")
plt.xlim(xmin=0)
plt.xlabel("number of " + diff_type + " changed")
bin_names = ["0" if bins[0] == 0 else "0-" + str(bins[0])]
for i in range(len(bins)):
if bins[i] > bins[i - 1] + 1:
bin_names.append(str(bins[i - 1] + 1) + "-" + str(bins[i]))
elif bins[i] == bins[i - 1] + 1:
bin_names.append(str(bins[i]))
bin_names.append(str(bins[-1]) + "+")
ax.set_yticks(np.arange(len(names)) + 0.5)
ax.set_yticklabels(names)
ax.set_xticks(x + 0.5)
ax.set_xticklabels(bin_names, minor=False)
colorbar = plt.colorbar(heatmap)
beautify_heatmap(colorbar=colorbar)
# plt.title("number of " + diff_type + " changed by method of correction")
# plt.tight_layout()
def plot_differences(l, ax, pivot, diff_type, bottom):
""" gets a list of (broken, words_differences, index_differences, spearman_differences, aligned_by, name) tuples and plot the plots"""
broken, words_differences, index_differences, spearman_differences, aligned_by, name = list(
range(6)) # tuple structure
ys = []
max_len = 0
colors = many_colors(range(len(l)))
for i, tple in enumerate(l):
y = create_hist(tple[pivot], bottom=bottom)
ys.append((y, tple[name], colors[i]))
max_len = max(max_len, len(y))
x = np.array(range(bottom, max_len + bottom))
for y, name, color in ys:
y = y + [0] * (max_len - len(y))
if name == "gold" or "fce" in name:
ax.plot(x, np.cumsum(y), "--", color=color, label=name)
else:
ax.plot(x, np.cumsum(y), color=color, label=name)
plt.autoscale(enable=True, axis='x', tight=False)
plt.ylabel("amount")
plt.xlabel("number of " + diff_type + " changed")
# plt.xlim(xmin=-x[-1]/5)
# plt.xticks([10*i for i in range(math.ceil(x[-1]/10) + 1)])
# plt.legend(loc=6, fontsize=10, fancybox=True, shadow=True)
plt.legend(loc=7, fontsize=10, fancybox=True, shadow=True)
# plt.title("accumulative number of sentences by " + diff_type + " changed")
def plot_comparison(l):
"""gets a list of tuple parameters and plots them"""
data = []
ax = plt.subplot(221)
plot_spearman_differences(l, ax)
ax = plt.subplot(222)
plot_spearman_ecdf(l, ax)
ax = plt.subplot(223)
plot_aligned_by(l, ax)
ax = plt.subplot(224)
plot_not_aligned(l, ax)
plt.clf()
data = []
dirname = "./plots/"
ax = plt.subplot(111)
plot_spearman_differences(l, ax)
plt.savefig(dirname + r"spearman_differences" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_spearman_ecdf(l, ax)
plt.savefig(dirname + r"spearman_ecdf" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_words_differences(l, ax)
plt.savefig(dirname + r"words_differences" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_words_differences_hist(l, ax)
plt.savefig(dirname + r"words_differences_hist" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_words_relative_differences_hist(l, ax)
plt.savefig(dirname + r"words_relative_differences_hist" +
trial_name + ".png", bbox_inches='tight')
plt.show()
plt.clf()
ax = plt.subplot(111)
plot_words_heat(l, ax)
plt.savefig(dirname + r"words_differences_heat" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_index_differences(l, ax)
plt.savefig(dirname + r"index_differences" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_index_differences_hist(l, ax)
plt.savefig(dirname + r"index_differences_hist" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_aligned_by(l, ax)
plt.savefig(dirname + r"aligned_all" +
trial_name + ".png", bbox_inches='tight')
plt.clf()
ax = plt.subplot(111)
plot_not_aligned(l, ax)
plt.savefig(dirname + r"aligned" + trial_name +
".png", bbox_inches='tight')
plt.clf()
###########################################################
#### UTIL ###
###########################################################
def convert_file_to_csv(filename):
l = read(filename)
filename = os.path.splitext(filename)[0] + ".csv"
col_names = ["words_differences", "index_differences",
"spearman_differences", "aligned_by"]
names = l.keys()
names_row = []
spacing_left = int(len(col_names) / 2) * [""]
spacing_right = (int((len(col_names) + 1) / 2) - 1) * [""]
for name in names:
names_row += spacing_left + [name] + spacing_right
col_names = col_names * len(names_row)
max_len = 0
for value in l.values():
for lst in value:
lst = lst[1:] # remove sentence breaks
max_len = max(max_len, len(lst))
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(names_row)
writer.writerow(col_names)
for i in range(max_len):
row = []
for value in l.values():
value = value[1:]
for lst in value:
if len(lst) > i:
row.append(lst[i])
else:
row.append("")
writer.writerow(row)
def read(filename):
try:
with open(filename, "r+") as fl:
return json.load(fl)
except FileNotFoundError as e:
print(e, "The file was not found, creating it instead")
return dict()
except json.decoder.JSONDecodeError as e:
print("json decoder error in ", filename, ":", e)
return dict()
def dump(l, filename):
out = read(filename)
for obj in l:
name = obj[-1]
obj = obj[:-1]
if name not in out:
print(name, " name")
out[name] = obj
with open(filename, "w+") as fl:
json.dump(out, fl)
if __name__ == '__main__':
main()
|
# Created by MechAviv
# Map ID :: 807040000
# Momijigaoka : Unfamiliar Hillside
if "1" not in sm.getQRValue(57375) and sm.getChr().getJob() == 4001:
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.sendDelay(1000)
sm.levelUntil(10)
sm.createQuestWithQRValue(57375, "1")
sm.removeSkill(40010001)
sm.setJob(4100)
sm.resetStats()
# Unhandled Stat Changed [HP] Packet: 00 00 00 04 00 00 00 00 00 00 CB 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MHP] Packet: 00 00 00 08 00 00 00 00 00 00 C2 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MMP] Packet: 00 00 00 20 00 00 00 00 00 00 71 00 00 00 FF 00 00 00 00
sm.addSP(6, True)
# Unhandled Stat Changed [MHP] Packet: 00 00 00 08 00 00 00 00 00 00 BC 01 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MMP] Packet: 00 00 00 20 00 00 00 00 00 00 D5 00 00 00 FF 00 00 00 00
# [INVENTORY_GROW] [01 1C ]
# [INVENTORY_GROW] [02 1C ]
# [INVENTORY_GROW] [03 1C ]
# [INVENTORY_GROW] [04 1C ]
sm.giveSkill(40010000, 1, 1)
sm.giveSkill(40010067, 1, 1)
sm.giveSkill(40011288, 1, 1)
sm.giveSkill(40011289, 1, 1)
sm.removeSkill(40011227)
sm.giveSkill(40011227, 1, 1)
# Unhandled Stat Changed [HP] Packet: 00 00 00 04 00 00 00 00 00 00 D2 01 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MP] Packet: 00 00 00 10 00 00 00 00 00 00 DF 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [WILL_EXP] Packet: 00 00 00 00 40 00 00 00 00 00 20 2B 00 00 FF 00 00 00 00
# Unhandled Message [INC_NON_COMBAT_STAT_EXP_MESSAGE] Packet: 14 00 00 40 00 00 00 00 00 20 2B 00 00
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False) |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from pools.models import Pool
from pools.serializers import PoolSerializer
@csrf_exempt
def pool_list(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
pools = Pool.objects.all()
serializer = PoolSerializer(pools, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = PoolSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def pool_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
pool = Pool.objects.get(pk=pk)
except Pool.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = PoolSerializer(pool)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = PoolSerializer(pool, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
pool.delete()
return HttpResponse(status=204) |
import torch
from torch.utils import data
from torchvision import datasets, transforms
train = datasets.MNIST('', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.RandomRotation(30)
]))
test = datasets.MNIST('', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
trainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
testset = torch.utils.data.DataLoader(test, batch_size=10, shuffle=False)
|
# Copyright (c) 2016 The Foundry Visionmongers Ltd. All Rights Reserved.
import os.path
import nuke
import nukescripts
import re
import PySide.QtGui
kCommandField = 'Command:'
last_cmd = ''
def script_command(default_cmd):
global last_cmd
p = nuke.Panel("Nuke")
if (default_cmd != None and len(default_cmd) != 0):
use_cmd = default_cmd
else:
use_cmd = last_cmd
p.addScriptCommand(kCommandField, use_cmd)
p.addButton("Cancel")
p.addButton("OK")
result = p.show()
if result == 1:
last_cmd = p.value(kCommandField)
p.execute(kCommandField)
def findMaxVersionForFileName(filename):
"""Helper function for finding the max version of a paticular
script in it's current directory.
Note that a file in the current directory will count towards the
current version set if the string before the v number for that file
is the same as the string before the v numebr of the current version."""
# Get the maximum version number based in the current files on disk
(basePath, fileNameCurrent) = os.path.split(filename)
(prefixCurrent, vCurrent) = nukescripts.version_get(fileNameCurrent, "v")
# Set maxV to the current version + 1
maxV = int(vCurrent) + 1
# Get the base name for the current file version.
# i.e. the bit of the filename before the version number.
baseNameRegex = "(.*)" + str(prefixCurrent)
baseMatch = re.match(baseNameRegex, fileNameCurrent, re.IGNORECASE)
if not baseMatch:
return maxV
baseNameCurrent = baseMatch.group(1)
# Iterate the files in the current directory
for fileName in os.listdir(basePath):
# get the base name of each file.
match = re.match(baseNameRegex, fileName, re.IGNORECASE)
if not match:
continue
baseNameFile = match.group(1)
# Check whether the base name is the same as the current file
if baseNameFile == baseNameCurrent:
# Compare the v number and update maxV if needed.
(prefix, version) = nukescripts.version_get(fileName, "v")
if version > maxV:
maxV = int(version) + 1
return maxV
class VersionHelper(object):
"""Helper class for storing the new version information"""
"""Intended to be created per rootname."""
def __init__(self, rootname):
(prefix, v) = nukescripts.version_get(rootname, "v")
self._rootname = rootname
self._prefix = prefix
self._currentV = int(v)
self._maxV = findMaxVersionForFileName(rootname)
def hasVersion(self):
return self._currentV is not None
def nextVersion(self):
return self._currentV + 1
def maxVersion(self):
return self._maxV
def currentVersionString(self):
return self._rootname
def nextVersionString(self):
return self.versionString(self.nextVersion())
def maxVersionString(self):
return self.versionString(self.maxVersion())
def versionString(self, version):
return nukescripts.version_set(self._rootname, self._prefix, self._currentV, version)
#End VersionHelper
class VersionConflictDialog(PySide.QtGui.QDialog):
"""Dialog which gives the user options for resolving version conflicts"""
def __init__(self,versionHelper,parent=None):
super(VersionConflictDialog, self).__init__(parent)
self._newPath = None
self._newVersion = None
self._eButtonIds = {
"overwrite": 0,
"saveAsMax": 1,
"saveAsVersion": 2,
}
self._versionHelper = versionHelper
self.setWindowTitle("Version Conflict")
self.setSizePolicy( PySide.QtGui.QSizePolicy.Expanding, PySide.QtGui.QSizePolicy.Fixed )
self.setMinimumWidth(450)
layout = PySide.QtGui.QVBoxLayout()
layout.setSpacing(0)
filename = versionHelper.nextVersionString()
text = PySide.QtGui.QLabel("Unable to save script. Version:\n%s\nalready exists. \n\nWould you like to:" % filename)
layout.addWidget(text)
self._buttonGroup = PySide.QtGui.QButtonGroup(self)
overwriteButton = PySide.QtGui.QRadioButton("Overwrite existing version")
self._buttonGroup.addButton(overwriteButton)
self._buttonGroup.setId(overwriteButton, self._eButtonIds["overwrite"])
overwriteButton.setChecked(True)
saveAsmaxVersionButton = PySide.QtGui.QRadioButton("Save as max version (%s)" % versionHelper._maxV)
self._buttonGroup.addButton(saveAsmaxVersionButton)
self._buttonGroup.setId(saveAsmaxVersionButton, self._eButtonIds["saveAsMax"])
saveAsVersionButton = PySide.QtGui.QRadioButton("Save as version: ")
self._buttonGroup.addButton(saveAsVersionButton)
self._buttonGroup.setId(saveAsVersionButton, self._eButtonIds["saveAsVersion"])
self._saveAsVersionSpin = PySide.QtGui.QSpinBox()
self._saveAsVersionSpin.setValue(versionHelper._maxV)
self._saveAsVersionSpin.setButtonSymbols(PySide.QtGui.QAbstractSpinBox.NoButtons)
self._saveAsVersionSpin.setFixedWidth(30)
self._saveAsVersionSpin.setContentsMargins(0,0,0,0)
# Negative versions are not allowed, so set min valid version to 1
versionValidator = PySide.QtGui.QIntValidator()
versionValidator.setBottom(1)
self._saveAsVersionSpin.lineEdit().setValidator(versionValidator)
saveAsVerionLayout = PySide.QtGui.QHBoxLayout()
saveAsVerionLayout.setSpacing(0)
saveAsVerionLayout.setContentsMargins(0,0,0,0)
saveAsVerionLayout.setAlignment(PySide.QtCore.Qt.AlignLeft)
saveAsVerionLayout.addWidget(saveAsVersionButton)
saveAsVerionLayout.addWidget(self._saveAsVersionSpin)
layout.addWidget(overwriteButton)
layout.addWidget(saveAsmaxVersionButton)
layout.addLayout(saveAsVerionLayout)
# Standard buttons for Add/Cancel
buttonbox = PySide.QtGui.QDialogButtonBox(PySide.QtGui.QDialogButtonBox.Ok | PySide.QtGui.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
layout.addWidget(buttonbox)
self.setLayout(layout)
def showDialog(self):
result = self.exec_()
if result:
buttonId = self._buttonGroup.checkedId()
if(buttonId < 0 ):
return None
#Get the correct path for that button ID
if buttonId is self._eButtonIds["overwrite"]:
self._newPath = self._versionHelper.nextVersionString()
self._newVersion = self._versionHelper.nextVersion()
elif buttonId is self._eButtonIds["saveAsMax"]:
self._newPath = self._versionHelper.maxVersionString()
self._newVersion = self._versionHelper.maxVersion()
elif buttonId is self._eButtonIds["saveAsVersion"]:
self._newVersion = self._saveAsVersionSpin.value()
self._newPath = self._versionHelper.versionString(self._newVersion )
return result
def getNewFilePath(self):
# Get the checked button id from the button group
return self._newPath
def getNewVersionNumber(self):
return self._newVersion
#End VersionDialog
def set_fileknob_version(knob, version):
"""Sets version of the filename knob to the passed in version.
Throws with ValueError if fileKnob has no version."""
currentPath = knob.value()
if currentPath:
(prefix, v) = nukescripts.version_get(currentPath, "v")
newPath = nukescripts.version_set(currentPath, prefix, int(v), version)
knob.setValue(newPath)
def timeline_write_version_set(version):
"""Sets the version number in the file path of the 'timeline' write node"""
kTimelineWriteNodeKnobName = "timeline_write_node"
timelineWriteNodeKnob = nuke.root().knob(kTimelineWriteNodeKnobName)
if timelineWriteNodeKnob is not None:
timelineWriteNodeName = timelineWriteNodeKnob.getText()
writeNode = nuke.toNode(timelineWriteNodeName)
if writeNode is not None:
# Set file knob
fileKnob = writeNode['file']
set_fileknob_version(fileKnob, version)
# Set proxy knob
proxyKnob = writeNode['proxy']
set_fileknob_version(proxyKnob, version)
def script_version_up():
""" Increments the versioning in the script name and the path of the timeline
write nodes, then saves the new version. """
# Set up the version helper
root_name = nuke.toNode("root").name()
versionHelper = VersionHelper(root_name)
if not versionHelper.hasVersion():
print "Version: Cannot version up as version string not found in filename. %s" % root_name
return
newFileName = versionHelper.nextVersionString()
newVersion = versionHelper.nextVersion()
# If the next version number already exists we need to ask the user how to proceed
newVersionExists = os.path.exists( newFileName )
if newVersionExists:
versionDialog = VersionConflictDialog(versionHelper)
cancelVersionUp = not versionDialog.showDialog()
if cancelVersionUp:
return
else:
newFileName = versionDialog.getNewFilePath()
newVersion = versionDialog.getNewVersionNumber()
# Get the Studio write Node and version up before saving the script
timeline_write_version_set(newVersion)
#Make the new directory if needed
dirName = os.path.dirname( newFileName )
if not os.path.exists( dirName ):
os.makedirs( dirName )
#Save the script and add to the bin
nuke.scriptSaveAs(newFileName)
if nuke.env['studio']:
from hiero.ui.nuke_bridge.nukestudio import addNewScriptVersionToBin
addNewScriptVersionToBin(root_name, newFileName)
def script_and_write_nodes_version_up():
# Just calls script_version_up
script_version_up()
def get_script_data():
activechans = nuke.Root.channels()
totchan = len(activechans)
root = nuke.toNode("root")
rez = root.knob("proxy").value()
numnodes = len(nuke.allNodes())
chaninuse = totchan
chanleft = 1023-totchan
memusage = nuke.cacheUsage()/1024/1024
output = "Script : "+root.name()+"\n"
output = output+"Total nodes: "+str(numnodes)+"\n"
if rez:
output = output+"\nResolution : --PROXY--\n"
else:
output = output+"\nResolution : **FULL RES**\n"
output += "\nElements:\n"+nukescripts.get_reads("long")
output += "\nChannels in use: "+str(totchan)+"\n"
output += "Channels left: "+str(chanleft)+"\n"
output += "\nCache Usage: "+str(memusage)+" mb\n"
output += "\nChannel Data :\n"
layers = nuke.Root.layers()
for i in layers:
output += "\n"+i.name()+"\n"
channels = i.channels()
for j in channels:
if j in activechans:
output += "\t"+j+"\n"
return output
def script_data():
nuke.display("nukescripts.get_script_data()", nuke.root())
def script_directory():
return nuke.script_directory()
|
from __future__ import absolute_import
import unicurses
import curses
from time import sleep
from ..db import queries
from pprint import pprint
from tabulate import tabulate
def dict2tab(ary_dict):
headers=["exec_id","job_name","project_name","status"]
if len(ary_dict)==0:return {"headers":[],"table":[]}
last_3_execs=sorted(list(set([int(elem['exec_id']) for elem in ary_dict ])))
table=[[elem[k] for k in headers ] for elem in ary_dict if elem['exec_id'] in last_3_execs[-3:]]
return {"headers":headers,"table":table}
#return {"headers":list(ary_dict[0].keys()),"table":[list(elem.values()) for elem in ary_dict if elem['exec_id']==73]}
def recolor(sc,print_data,result):
header_color_id=10
current_exec_color_id=2
curses.start_color()
curses.init_pair(10,curses.COLOR_YELLOW,curses.COLOR_BLACK)
curses.init_pair(3,curses.COLOR_MAGENTA,curses.COLOR_BLACK)
curses.init_pair(1,curses.COLOR_RED,curses.COLOR_BLACK)
curses.init_pair(2,curses.COLOR_GREEN,curses.COLOR_BLACK)
curses.init_pair(3,curses.COLOR_BLUE,curses.COLOR_WHITE)
curses.init_pair(4,curses.COLOR_MAGENTA,curses.COLOR_BLACK)
curses.init_pair(6,curses.COLOR_BLUE,curses.COLOR_BLACK)
sc.addstr('\n'.join(print_data.split('\n')[0:3]) + '\n',
curses.color_pair(10))
for idx,lines in enumerate(print_data.split('\n')[3:]):
print int(result["table"][int(idx/2)][-1])+1
sc.addstr(lines+'\n',curses.color_pair(int(result["table"][int(idx/2)][-1])+1))
def printscr(sc):
sc.nodelay(1)
while True:
result = dict2tab(queries.db_get_all())
print_data=tabulate(tabular_data=result["table"],headers=result['headers'],tablefmt="fancy_grid").encode('utf-8')
recolor(sc=sc,print_data=print_data,result=result)
sc.refresh()
if sc.getch() == ord('q'):
break
sleep(1)
sc.clear()
#unicurses.endwin()
return 0
def display():
curses.wrapper(printscr)
if __name__=='__main__':
pass |
# coding: utf-8
from functools import reduce
from annoying.decorators import render_to
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from fuzzywuzzy import fuzz
from scoop.analyze.abstract.classifiable import ClassifiableModel
from scoop.core.abstract.content.picture import PicturableModel
from scoop.core.abstract.core.data import DataModel
from scoop.core.abstract.core.datetime import DatetimeModel
from scoop.core.abstract.user.ippoint import IPPointableModel, IPPointModel
from scoop.core.templatetags.text_tags import truncate_ellipsis
from scoop.core.util.data.dateutil import now
from scoop.core.util.model.model import SingleDeleteManager, search_query
from scoop.core.util.shortcuts import addattr
from scoop.messaging.util.signals import mailable_event, message_check_spam, message_pre_send, message_sent, message_set_spam
from scoop.messaging.util.text import format_message
__all__ = ['Message']
class MessageManager(SingleDeleteManager):
""" Manager des messages """
# Constantes
MESSAGE_SIZE_LIMIT = 1024
def get_queryset(self):
""" Renvoyer le queryset par défaut du manager """
return super(MessageManager, self).get_queryset()
# Actions
def _add(self, thread, author, body, request=None, strip_tags=False, as_mail=True):
"""
Ajouter un message à un fil de discussion
:param thread: fil de discussion dans lequel ajouter le message
:param author: auteur du message
:param body: corps du message
:param request: objet HttpRequest, utilisé notamment pour consigner l'IP utilisée
:param strip_tags: retirer tous les tags HTML
:param as_mail: envoyer un mail en plus du message
:returns: le nouveau message, ou lève une exception PermissionDenied
:rtype: Message | PermissionDenied
"""
from scoop.messaging.models import Recipient
from scoop.user.access.models import IP
from scoop.user.models import User
# Par défaut un auteur None devient un bot
if author is None:
author = User.objects.get_bot() if (request is None or request.user.is_anonymous()) else request.user
# Envoyer un signal de vérification
results = message_pre_send.send(sender=Message, author=author, thread=thread, request=request)
# Ne rien faire si le traitement l'interdit
if any([result[1] is not True for result in results]):
messages = [str(message) for message in reduce(lambda x, y: x + y, [result[1]['messages'] for result in results if result[1] is not True])]
raise PermissionDenied(*messages) # Le message est un tableau
# Formater le corps du message
body = format_message(body, limit=MessageManager.MESSAGE_SIZE_LIMIT, strip_tags=strip_tags and not request.user.is_staff)
# Ajouter le message + les indicateurs de non-lecture
message = Message(thread=thread, author=author, name=author.username, text=body, ip=IP.objects.get_by_request(request))
message.save(force_insert=True)
Recipient.objects.set_unread_by_message(message)
# Envoyer un signal indiquant qu'un message a été envoyé'
message_sent.send(sender=Message, author=author, message=message, request=request)
# Récupérer les destinataires
recipients = thread.get_users(exclude=author)
# Puis mettre en file des mails à envoyer aux destinataires
mailtype = 'messaging.message.new' if not author.is_staff else 'messaging.message.staff'
if as_mail is True and thread.deleted is False:
for recipient in recipients:
mailable_event.send(sender=Message, mailtype=mailtype, recipient=recipient, data={'sender': [author], 'message': [message.text]})
# Mettre à jour la date de mise à jour du sujet
if not author.is_bot() and author.is_active:
thread.updater = author
thread.counter += 1
thread.save()
return message
# Getter
def for_user(self, user, sorting=None):
""" Renvoyer les messages à l'attention de l'utilisateur """
messages = self.select_related('author', 'thread', 'ip').filter(thread__recipient__user=user).exclude(author=user)
messages = messages.order_by(sorting) if isinstance(sorting, str) else messages
return messages
def last_messages(self, minutes=30, **kwargs):
""" Renvoyer les messages des n dernières minutes """
stamp = now() - minutes * 60
messages = self.filter(time__gt=stamp, **kwargs)
return messages
def last_user_messages(self, user, minutes=30):
""" Renvoyer les messages d'un utilisateur pour les n dernières minutes """
messages = self.last_messages(minutes, author=user)
return messages
def user_messages(self, user, **kwargs):
""" Renvoyer les messages envoyés par un utilisateur """
return user.messages_sent.filter(**kwargs)
def get_last_user_message(self, user, ghost=False):
""" Renvoyer le dernier message envoyé par un utilisateur """
filtering = {} if ghost is True else {'deleted': False}
return user.messages_sent.filter(**filtering).order_by('-id').first()
def get_user_message_count(self, user, **kwargs):
""" Renvoyer le nombre de messages envoyés par un utilisateur """
return user.messages_sent.only('pk').filter(**kwargs).count()
def text_search(self, expression, **kwargs):
""" Renvoyer les messages contenant une expression """
return search_query(expression, ['text'], self.filter(**kwargs))
def get_spam_data(self, user):
""" Renvoyer les informations de spam pour un utilisateur """
data = {'total': self.get_user_message_count(user), '%': 0, 'spam': self.get_user_message_count(user, spam__gt=Message.SPAM_THRESHOLD)}
data['%'] = (100.0 * data['spam'] / data['total']) if data['total'] > 0 else 0
return data
class Message(IPPointableModel, DatetimeModel, PicturableModel, DataModel, ClassifiableModel):
""" Message de discussion """
# Constantes
DATA_KEYS = {'pasted', 'similar', 'delete_time'} # clés autorisées par datamodel
SPAM_THRESHOLD = 0.83
classifications = {'spam': ('1', '0'), 'level': ('l', 'm', 'h')}
# Champs
author = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name="messages_sent", on_delete=models.SET_NULL, verbose_name=_("Author"))
name = models.CharField(max_length=32, verbose_name=_("Name"))
thread = models.ForeignKey("messaging.Thread", on_delete=models.CASCADE, related_name='messages', null=False, verbose_name=_("Thread"))
text = models.TextField(blank=False, verbose_name=_("Text"))
spam = models.FloatField(default=0.0, validators=[MaxValueValidator(1.0), MinValueValidator(0.0)], verbose_name=_("Spam level"))
deleted = models.BooleanField(default=False, db_index=True, verbose_name=pgettext_lazy('message', "Deleted"))
objects = MessageManager()
# Getter
@render_to("messaging/message/body.html")
def get_text_html(self):
""" Renvoyer la représentation HTML du message """
return {'message': self}
def get_geoip(self):
""" Renvoyer les informations GeoIP du message """
return self.ip.get_geoip() if self.ip is not None else None
def get_recipients(self, only_active=True):
""" Renvoyer les destinataires du message """
return self.thread.get_recipients(only_active=only_active)
def get_recipients_to(self, only_active=True):
""" Renvoyer les destinataires ciblés par le message """
return self.thread.get_recipients(exclude=self.author, only_active=only_active)
def get_similarity(self, message):
""" Renvoyer l'indice de similarité entre ce message et un autre """
return fuzz.token_sort_ratio(self.text, message.text)
def get_similar_user_message_count(self, limit=100, ratio=0.8):
""" Renvoyer le nombre de messages similaires du même utilisateur """
messages = Message.objects.user_messages(self.author).order_by('-id')[0:limit]
return self._get_similar_messages(messages, ratio=ratio)
def get_similar_message_count(self, minutes=60, limit=100, ratio=0.8):
""" Renvoyer le nombre de messages similaires de tous les utilisateurs """
messages = Message.last_messages(minutes=minutes).order_by('-id')[0:limit]
return self._get_similar_messages(messages, ratio=ratio)
def check_spam(self):
""" Vérifier si le message est du spam """
message_check_spam.send(None, message=self)
@addattr(boolean=True, short_description=_("Spam"))
def is_spam(self):
""" Renvoyer si le message est du spam """
return self.spam >= Message.SPAM_THRESHOLD
# Setter
def set_spam(self, value=None, save=True):
""" Définir le niveau de spam du message"""
value = 1.0 if value is None else 0.0 if value < 0 else 1.0 if value > 1 else value
message_set_spam.send(None, message=self)
self.update(spam=value, save=save)
def remove(self):
""" Supprimer le message """
self.delete()
self.thread.delete() if self.thread.counter == 0 else self.thread.save(force_update=True)
# Privé
def _get_similar_messages(self, messages, ratio=0.8):
""" Renvoyer le nombre de messages similaires dans un queryset de messages """
return len([True for message in messages if self.get_similarity(message) >= ratio]) # TODO: DataModel similar
def get_document(self):
""" Renvoyer une représentation tokenisée du texte de message pour la classification """
return self.text
# Propriétés
recipients = property(get_recipients)
geoip = property(get_geoip)
html = property(get_text_html)
ip_address = property(IPPointModel.get_ip_address, IPPointModel.set_ip)
# Overrides
def save(self, *args, **kwargs):
""" Enregistrer le message dans la base de données """
self.text = self.text.strip()
if self.name == '' and self.author is not None:
self.name = self.author.username
self.check_spam()
super(Message, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
""" Supprimer le message de la base de données """
if kwargs.pop('clear', False) is True:
super(Message, self).delete(*args, **kwargs)
elif self.deleted is False:
self.thread.counter -= 1
self.thread.save()
self.deleted = True
self.set_data('delete_time', timezone.now())
self.save()
return True
def undelete(self):
""" Restaurer le message si marqué comme supprimé """
if self.deleted is True:
self.thread.counter += 1
self.thread.save()
self.deleted = False
self.save()
def __str__(self):
""" Renvoyer une représentation unicode de l'objet """
return """Message #{thread:010} "{message}" """.format(thread=self.thread_id, message=truncate_ellipsis(self.text, 24))
def get_absolute_url(self):
""" Renvoyer l'URL de l'objet """
return self.thread.get_absolute_url()
# Métadonnées
class Meta:
verbose_name = _("message")
verbose_name_plural = _("messages")
permissions = (("force_send_message", "Can force send messages"),
("broadcast_message", "Can broadcast messages"),
)
app_label = "messaging"
|
from __future__ import absolute_import, division, print_function, \
unicode_literals
from pyaudio import PyAudio
from pymimic import *
from os.path import realpath, dirname
from time import sleep
voice_path = dirname(realpath(__file__)) + '/cmu_us_rms.flitevox'
if __name__ == '__main__':
v = Voice(voice_path)
s = Speak('Hello there! This is python!', v)
p = PyAudio()
stream = p.open(format=2, channels=s.channels,
rate=s.sample_rate, output=True)
stream.write(s.bin())
sleep(0.5)
|
# DELETE_OBJECT = 'DELETE FROM axiom_objects WHERE oid = ?'
CREATE_OBJECT = 'INSERT INTO *DATABASE*.axiom_objects (type_id) VALUES (?)'
CREATE_TYPE = 'INSERT INTO *DATABASE*.axiom_types (typename, module, version) VALUES (?, ?, ?)'
GET_TABLE_INFO = 'PRAGMA *DATABASE*.table_info(?)'
# The storeID for an object must be unique over the lifetime of the store.
# Since the storeID is allocated by inserting into axiom_objects, we use
# AUTOINCREMENT so that oids/rowids and thus storeIDs are never reused.
# The column is named "oid" instead of "storeID" for backwards compatibility
# with the implicit oid/rowid column in old Stores.
CREATE_OBJECTS = """
CREATE TABLE *DATABASE*.axiom_objects (
oid INTEGER PRIMARY KEY AUTOINCREMENT,
type_id INTEGER NOT NULL
CONSTRAINT fk_type_id REFERENCES axiom_types(oid)
)
"""
CREATE_OBJECTS_IDX = """
CREATE INDEX *DATABASE*.axiom_objects_type_idx
ON axiom_objects(type_id);
"""
CREATE_TYPES = """
CREATE TABLE *DATABASE*.axiom_types (
oid INTEGER PRIMARY KEY AUTOINCREMENT,
typename VARCHAR,
module VARCHAR,
version INTEGER
)
"""
CREATE_ATTRIBUTES = """
CREATE TABLE *DATABASE*.axiom_attributes (
type_id INTEGER,
row_offset INTEGER,
indexed BOOLEAN,
sqltype VARCHAR,
allow_none BOOLEAN,
pythontype VARCHAR,
attribute VARCHAR,
docstring TEXT
)
"""
BASE_SCHEMA = [
CREATE_OBJECTS, CREATE_OBJECTS_IDX, CREATE_TYPES, CREATE_ATTRIBUTES]
TYPEOF_QUERY = """
SELECT *DATABASE*.axiom_types.typename, *DATABASE*.axiom_types.module, *DATABASE*.axiom_types.version
FROM *DATABASE*.axiom_types, *DATABASE*.axiom_objects
WHERE *DATABASE*.axiom_objects.oid = ?
AND *DATABASE*.axiom_types.oid = *DATABASE*.axiom_objects.type_id
"""
HAS_SCHEMA_FEATURE = ("SELECT COUNT(oid) FROM *DATABASE*.sqlite_master "
"WHERE type = ? AND name = ?")
IDENTIFYING_SCHEMA = ('SELECT indexed, sqltype, allow_none, attribute '
'FROM *DATABASE*.axiom_attributes WHERE type_id = ? '
'ORDER BY row_offset')
ADD_SCHEMA_ATTRIBUTE = (
'INSERT INTO *DATABASE*.axiom_attributes '
'(type_id, row_offset, indexed, sqltype, allow_none, attribute, docstring, pythontype) '
'VALUES (?, ?, ?, ?, ?, ?, ?, ?)')
ALL_TYPES = 'SELECT oid, module, typename, version FROM *DATABASE*.axiom_types'
LATEST_TYPES = 'SELECT typename, MAX(version) FROM *DATABASE*.axiom_types GROUP BY typename'
GET_GREATER_VERSIONS_OF_TYPE = ('SELECT version FROM *DATABASE*.axiom_types '
'WHERE typename = ? AND version > ?')
SCHEMA_FOR_TYPE = ('SELECT indexed, pythontype, attribute, docstring '
'FROM *DATABASE*.axiom_attributes '
'WHERE type_id = ?')
CHANGE_TYPE = 'UPDATE *DATABASE*.axiom_objects SET type_id = ? WHERE oid = ?'
APP_VACUUM = 'DELETE FROM *DATABASE*.axiom_objects WHERE (type_id == -1) AND (oid != (SELECT MAX(oid) from *DATABASE*.axiom_objects))'
|
from math import*
n = int(input("Numero de Termos: "))
acum = 0
cont = 0
while ( cont < n):
acum = acum + 1/(factorial(cont))
cont = cont + 1
print(round(acum, 8)) |
#!/usr/bin/env python
from argparse import ArgumentParser
from pathlib import Path
import numpy as np
import yt
parser = ArgumentParser()
parser.add_argument('-s', '--snapshot')
parser.add_argument('-o', '--output', type=Path, default='center.txt')
parser.add_argument('-p', '--percentile', default=80)
args = parser.parse_args()
# Load snapshot
ds = yt.frontends.gizmo.GizmoDataset(args.snapshot)
# Determine center
ad = ds.all_data()
pt = 'gas'
dens = ad[pt, 'density'].to_value('code_density')
mask = dens > np.percentile(dens, args.percentile)
pos = ad[pt, 'position'][mask]
m = ad[pt, 'mass'][mask]
center = np.average(pos, weights=m, axis=0)
# Save output
center = center.to_value('code_length')
args.output.parent.mkdir(parents=True, exist_ok=True)
np.savetxt(args.output, center, fmt='%.3f')
|
import pickle
import string
import time
from argparse import ArgumentParser
import nltk
import numpy as np
from grpc.beta import implementations
from tensorflow.contrib.util import make_tensor_proto
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
def parse_args():
parser = ArgumentParser(description='Request a TensorFlow server for text classification')
parser.add_argument('-s', '--server',
dest='server',
default='172.17.0.2:9000',
help='Service host:port')
parser.add_argument('-t', '--text',
dest='text',
required=True,
help='Text to classify')
parser.add_argument('-d', '--dictionary',
dest='word2id',
required=True,
help='Translation table')
args = parser.parse_args()
host, port = args.server.split(':')
return host, port, args.text, args.word2id
def main():
host, port, text, word2id_dir = parse_args()
with open(word2id_dir, 'rb') as reader:
word2id = pickle.load(reader)
# Process string
# Translate string to numeric
punctuations = list(string.punctuation)
punctuations += ['``', "''"]
word_list = nltk.word_tokenize(text)
word_list = [word.lower() for word in word_list if word not in punctuations]
data = []
for word in word_list:
try:
translated_word = word2id[word]
except KeyError:
translated_word = word2id['<unk>']
data.append(translated_word)
if len(data) < 40:
data += [word2id['<pad>']] * (40 - len(data))
else:
data = data[:40]
data = np.expand_dims(data, axis=0)
data = data.astype(np.int32)
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
start = time.time()
request = predict_pb2.PredictRequest()
request.model_spec.name = 'cls'
request.model_spec.signature_name = 'prediction'
request.inputs['sentence'].CopyFrom(make_tensor_proto(data))
result = stub.Predict(request, 60.0)
end = time.time()
time_diff = end - start
print(result)
print('Time elapsed: {}'.format(time_diff))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3.8
from flask import Flask, Response
import flask
import time
import os
SECRET = os.environ["SECRET"]
HOST = os.environ.get("HOST", "127.0.0.1")
PORT = int(os.environ.get("PORT", "5000"))
app = Flask(__name__)
balance = 0
hold_till = 0
@app.route('/check-allowed/activity')
def check_allowed_acitivity():
if hold_till > time.time():
return 'true'
else:
return 'false'
@app.route('/form/hold', methods=["GET", "POST"])
def form_hold():
global balance
global hold_till
if flask.request.method == "POST":
value = int(flask.request.form.get('value', 0)) * 60
assert value > 0, \
"holding must be positive"
assert balance - value >= 0, \
"holding more than balance is not allowed"
balance -= value
now = int(time.time())
if hold_till < now:
hold_till = now
hold_till += value
return flask.redirect("")
return f"""
<p>Balance: {balance / 60}</p>
<p>Hold left: {max(0, (hold_till - time.time()) / 60)} minutes</p>
<form method=post>
<p><input type=number name=value value="20" /></p>
<p><input type=submit value=Hold></p>
</form>
"""
@app.route('/ts')
def ts():
return str(int(time.time()))
@app.route('/form/add', methods=["GET", "POST"])
def form_add():
global balance
if flask.request.method == "POST":
assert flask.request.form["secret"] == SECRET
# negative add is allowed
balance += int(flask.request.form.get("value", 0)) * 60
# negative balance is not allowed
if balance < 0:
balance = 0
return flask.redirect("")
return f"""
<p>Balance: {balance / 60}</p>
<form method=post>
<p><input placeholder=secret id=addsecret type=password name=secret /></p>
<p><input type=number name=value value="10" /></p>
<p><input type=submit value=Add></p>
</form>
<script>
addsecret.onkeyup = e => localStorage.addsecret = addsecret.value
addsecret.value = localStorage.addsecret
</script>
"""
@app.route("/form/clear", methods=["GET", "POST"])
def form_clear():
global hold_till
global balance
if flask.request.method == "POST":
hold_till = balance = 0
return flask.redirect("")
return f"""
<p>Balance: {balance / 60}</p>
<p>Hold left: {max(0, (hold_till - time.time()) / 60)} minutes</p>
<form method=post>
<p><input type=submit value=CLEAR></p>
</form>
"""
@app.route("/")
def main():
return """
<a href=/form/add>ADD</a>
|
<a href=/form/hold>HOLD</a>
|
<a href=/form/clear>CLEAR</a>
"""
app.run(host=HOST, port=PORT, debug=True)
|
import json
import pprint
encodedMovie = ('{"title": "Ale ja nie bede tego robil!", "release_year": 1969, "won_oscar": true, "actors": ["Arkadiusz Wlodarczyk", "Wiolleta Wlodarczyk"], "budget": null, "credits": {"director": "Arkadiusz Wlodarczyk", "writer": "Alan Burger", "animator": "Anime Animatrix"}}')
decodedMovie = json.loads(encodedMovie)
with open("sample.json", encoding = "UTF-8") as file:
wynik = json.load(file)
print(wynik)
pprint.pprint(wynik)
|
import os
import glob
import time
import torch
import torch.nn as nn
from shutil import copyfile, move
from model import load_model
from util import pil_loader, prepare_image
class EnsembleModel(nn.Module):
def __init__(self, models, input_size, num_to_cat):
super(EnsembleModel, self).__init__()
self.models = models
self.input_size = input_size
self.num_to_cat = num_to_cat
def forward(self, x):
predictions = [m.classify(x) for m in self.models]
return torch.mean(torch.stack(predictions), dim=0)
def make_ensemble(paths, device):
print(" * Loading ensemble ...")
# Load ensemble
emodels = []
for i in range(len(paths)):
m = load_model(paths[i], device)
if i==0:
print("Categories :", m.num_to_cat)
check = m.num_to_cat
input_size = m.input_size
if check == m.num_to_cat:
print("Adding {}".format(paths[i]))
m.to(device)
emodels.append(m)
else:
print("Categories do not match : {}".format(paths[i]))
print("Input Size :", input_size)
model = EnsembleModel(emodels, input_size, check)
print(" * Ensemble loaded.")
return model
def sort_folder(model, device, root, num=None):
print(" * Sorting folder : {} ...".format(root))
# Create folders for categories
class_folder_paths = [] # Absolute path to destination folder
for cat in model.num_to_cat.values():
cat_path = os.path.join(root, cat)
class_folder_paths.append(cat_path)
if not os.path.exists(cat_path):
os.mkdir(cat_path)
# Classify each image and cut-paste into label folder
image_types = ["*.jpg", "*.png", "*.jpeg"]
images = [f for ext in image_types for f in glob.glob(os.path.join(root, ext))]
print("{} total images.".format(len(images)))
max_count = min(num, len(images))
print(" * Sorting {} ...".format(max_count))
counts = [0]*len(model.num_to_cat)
start_time = time.time()
for i in range(max_count):
img_color = pil_loader(images[i])
img = prepare_image(img_color, model.input_size).to(device)
yclass = model(img)
class_prob, class_num = torch.max(yclass, dim=1)
counts[int(class_num)] += 1
try:
move(images[i], class_folder_paths[int(class_num)])
except:
print("Failed to move {}".format(images[i]))
if (i+1) % 50 == 0:
count = i+1
t2 = time.time() - start_time
rate = count/t2
est = t2/count * (max_count-count)
print("{}/{} images. {:.2f} seconds. {:.2f} images per seconds. {:.2f} seconds remaining.".format(count, max_count, t2, rate, est))
print("Labels per class :", counts)
print("Distribution of Labels:", [x / max_count for x in counts])
duration = time.time() - start_time
print(" * Sort Complete")
print(" * Duration {:.2f} Seconds".format(duration))
print(" * {:.2f} Images per Second".format(max_count/duration))
if __name__ == "__main__":
root = "images/unsorted_memes"
root = r"C:\Users\LUKE_SARGEN\projects\classifier\data\nah_unsorted"
num = 1000
model_paths = [
# "runs/demo.pth",
# "runs/save/run00218_final.pth",
# "runs/save/run00231_final.pth",
"runs/save/run00244_final.pth",
]
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("Device : {}".format(device))
model = make_ensemble(model_paths, device)
sort_folder(model, device, root, num)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-25 09:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notification', '0003_notification_foreignpk'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='action',
field=models.CharField(choices=[('none', 'none'), ('team_req_join', 'ønsker å bli med i ditt team'), ('team_req_acc', 'har godtatt ditt team forespørsel'), ('team_req_dec', 'har avslått ditt team forespørsel')], default='none', max_length=25),
),
]
|
from . import constants
from . import helpers
import os
import time
import datetime
from subprocess import Popen, PIPE
import numpy as np
import pandas as pd
UINT32 = np.dtype(np.uint32)
FLOAT32 = np.dtype(np.float32)
def decompress_bi5(filename):
"""Use lzma to decompress the bi5 file."""
if os.path.getsize(filename) == 0:
result = b''
else:
proc = Popen(['lzma', '--decompress', '--stdout', filename], stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
if exitcode != 0:
raise Exception(err.decode())
result = out
return result
def bi5_to_dataframe(filepath):
"""Convert the bi5 file to a Pandas DataFrame."""
#fmt = '>3i2f'
filename = constants.OUT_DIR + filepath
try:
uncompressed = decompress_bi5(filename)
raw_be = np.frombuffer(uncompressed, dtype=UINT32).reshape(-1,5)
raw_le = raw_be.byteswap()
int_data = raw_le[:,0:3]
flt_data = raw_le[:,3:5].view(dtype=FLOAT32)
data = {
'Timestamp' : int_data[:,0],
'Ask' : int_data[:,1],
'Bid' : int_data[:,2],
'AskVolume' : flt_data[:,0],
'BidVolume' : flt_data[:,1]
}
except:
print('Problem reading file {}'.format(filename))
raise
if True:
df = pd.DataFrame(data)
else:
df = None
return df
def convert_ticks(start_date, end_date, instr_type=None, instr_name=None):
"""Convert the downloaded files to a Pandas DataFrame."""
if instr_type is None:
instr_type=helpers.instrument_type
if instr_name is None:
instr_name=helpers.instrument_name
instrument = constants.INSTRUMENTS[instr_type][instr_name]
hourly_results = []
t_start = time.perf_counter()
date = start_date
while date <= end_date:
for hour in range(24):
dt_hour = date + datetime.timedelta(hours=hour)
filepath = helpers.get_filepath(instrument, date, hour)
raw_df = bi5_to_dataframe(filepath)
if raw_df is not None:
raw_df['Timestamp'] = raw_df['Timestamp'].map( lambda t : dt_hour + datetime.timedelta(milliseconds=t) )
hourly_results.append(raw_df)
if date.day == 1 and not date.month % 4:
helpers.log_output('Completed {}'.format(date), 3)
date += datetime.timedelta(days=1)
df = pd.concat(hourly_results, ignore_index=True)
time_elapsed = time.perf_counter() - t_start
helpers.log_output('took {:.1f} s'.format(time_elapsed), 3)
return df |
#coding=utf-8
from framework.base_page import Basepage
class SportsNewsPage(Basepage):
#NBA新闻入口
nba_link='xpath=>/html/body/div[3]/div[2]/div[1]/div[2]/div/div[2]/div/ul/li[1]/a'
def click_nba_link(self):
self.click(self.nba_link)
self.wait(5)
|
# Copyright 2016 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import kingbirdclient
from kingbird.tests.tempest.scenario.quota_management. \
client_tests import base
from kingbird.tests import utils
DEFAULT_CLASS = "default"
DEFAULT_QUOTAS = base.DEFAULT_QUOTAS
QUOTA_CLASS_FORMAT = base.DEFAULT_QUOTAS.copy()
class KingbirdQuotaClassTestJSON(base.BaseKingbirdTest):
@classmethod
def setup_clients(self):
super(KingbirdQuotaClassTestJSON, self).setup_clients()
def tearDown(self):
super(KingbirdQuotaClassTestJSON, self).tearDown()
@classmethod
def resource_cleanup(self):
super(KingbirdQuotaClassTestJSON, self).resource_cleanup()
self.delete_resources()
@classmethod
def resource_setup(self):
super(KingbirdQuotaClassTestJSON, self).resource_setup()
self.create_resources()
def _delete_quota_values(self, class_name):
quota_value = self.get_quota_for_class(class_name)
resource_value = quota_value['cores']
return resource_value == DEFAULT_QUOTAS['cores']
def test_kb_quota_class_put_method(self):
new_quota = {"instances": 15, "cores": 10}
actual_value = self.update_quota_for_class(
self.class_name, new_quota)
expected_value = QUOTA_CLASS_FORMAT
expected_value['cores'] = 10
expected_value['instances'] = 15
expected_value['class_name'] = self.class_name
self.assertEqual(expected_value, actual_value)
self.delete_quota_for_class(self.class_name)
utils.wait_until_true(
lambda: self._delete_quota_values(self.class_name),
exception=RuntimeError("Timed out "))
def test_kb_quota_class_get_method(self):
new_quota = {"instances": 15, "cores": 10}
self.update_quota_for_class(self.class_name, new_quota)
actual_value = self.get_quota_for_class(self.class_name)
expected_value = QUOTA_CLASS_FORMAT
expected_value['cores'] = 10
expected_value['instances'] = 15
expected_value['class_name'] = self.class_name
self.assertEqual(expected_value, actual_value)
self.delete_quota_for_class(self.class_name)
utils.wait_until_true(
lambda: self._delete_quota_values(self.class_name),
exception=RuntimeError("Timed out "))
def test_kb_quota_class_delete_method(self):
new_quota = {"instances": 15, "cores": 15}
self.update_quota_for_class(self.class_name, new_quota)
self.delete_quota_for_class(self.class_name)
utils.wait_until_true(
lambda: self._delete_quota_values(self.class_name),
exception=RuntimeError("Timed out "))
quota_after_delete = self.get_quota_for_class(self.class_name)
self.assertNotEqual(quota_after_delete['cores'], 15)
self.assertNotEqual(quota_after_delete['instances'], 15)
def test_kb_quota_class_wrong_input(self):
new_quota = {"instanc": 15, "cores": 10}
self.assertRaises(kingbirdclient.exceptions.APIException,
self.update_quota_for_class, self.class_name,
new_quota)
def test_kb_quota_default_class_get_method(self):
actual_value = self.get_quota_for_class(DEFAULT_CLASS)
expected_value = DEFAULT_QUOTAS
expected_value['class_name'] = DEFAULT_CLASS
self.assertEqual(actual_value, expected_value)
def test_kb_quota_class_get_method_for_random_class_name(self):
actual_value = self.get_quota_for_class("random_class")
expected_value = DEFAULT_QUOTAS
expected_value['class_name'] = "random_class"
self.assertEqual(actual_value, expected_value)
def test_delete_quota_for_random_class(self):
self.assertRaisesRegex(kingbirdclient.exceptions.APIException, "404 *",
self.delete_quota_for_class, 'random_class')
|
import turicreate as tc
reviews = tc.SFrame.read_csv('../data/amazon_baby.csv')
reviews = reviews[reviews['rating'] != 3]
reviews['sentiment'] = reviews['rating'] >= 4
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible',
'bad', 'terrible', 'awful', 'wow', 'hate']
reviews['word_count'] = tc.text_analytics.count_words(reviews['review'])
selected_words_count = {}
for word in selected_words:
reviews[str(word)] = reviews['word_count'].apply(lambda row: row[word] if word in row else 0)
selected_words_count[word] = reviews[word].sum()
sorted_selected_words = sorted(selected_words_count.items(), key=lambda x: x[1], reverse=True)
train_data, test_data = reviews.random_split(.8, seed=0)
selected_words_model = tc.logistic_classifier.create(dataset=train_data, features=selected_words,
validation_set=test_data,
target='sentiment')
coefficients = selected_words_model.__getattribute__('coefficients')
coefficients = coefficients.sort('value')
evaluation_results = selected_words_model.evaluate(test_data)
diaper_champ_reviews = reviews[reviews['name'] == 'Baby Trend Diaper Champ']
sentiment_model = tc.logistic_classifier.create(dataset=train_data, target='sentiment', features=['word_count'],
validation_set=test_data)
evaluation_results_sentiment = sentiment_model.evaluate(test_data)
diaper_champ_reviews['predicted_sentiment'] = sentiment_model.predict(diaper_champ_reviews, output_type='probability')
diaper_champ_reviews = diaper_champ_reviews.sort('predicted_sentiment', ascending=False)\
result = selected_words_model.predict(diaper_champ_reviews[0:1], output_type='probability')
|
x1 = float(input("Intervalo: "))
x2 = float(input())
z = int(input("Numero de iteracoes:\n"))
import math
def funcao(val):
return math.sin(val)
for i in range (z):
_x = (x1*(funcao(x2)) - x2*(funcao(x1)))/(funcao(x2)-funcao(x1))
if (funcao(_x))*(funcao(x1)) < 0:
x2 = _x
else:
aux = x1
x1=_x
x2=aux
print (_x)
#print (_x)
|
import datetime
from enum import Enum
import jwt
from aiohttp import web
from aiohttp_session import get_session
from playhouse.postgres_ext import *
from error import _error
from model.base import *
from model.tariff import Tariff
class UserStatus(Enum):
UNCONFIRMED = 1
CONFIRMED = 2
ADMINISTRATOR = 3
class User(BaseModel):
first_name = TextField(null=True)
last_name = TextField(null=True)
email = TextField(unique=True)
password = TextField()
phone_number = TextField(unique=True)
status = IntegerField(default=0, choices=UserStatus)
tariff = ForeignKeyField(Tariff, null=True)
links = JSONField(null=True)
created_at = DateTimeField(default=datetime.datetime.now)
def encode_auth_token(self):
"""
Generates the Auth Token
:return: string
"""
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': self.id,
}
return jwt.encode(
payload=payload,
key=config.SECRET_KEY,
algorithm='HS256')
@staticmethod
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, config.SECRET_KEY)
return payload['sub']
except jwt.ExpiredSignatureError:
return False
except jwt.InvalidTokenError:
return False
@staticmethod
def get_user_by_token(auth_token):
user_id = User.decode_auth_token(auth_token)
if user_id:
return User.get(id=user_id)
else:
return False
@staticmethod
async def handle_registration_form(data, request):
session = await get_session(request)
if 'phone' and 'password' in data:
phone = data['phone']
password = data['password']
if 'email' in data:
email = data['email']
else:
email = None
if phone and password:
user = User.create(phone_number=phone, email=email, password=password)
auth_token = user.encode_auth_token().decode("utf-8")
session['auth_token'] = auth_token
return web.json_response({'auth_token': auth_token})
return _error.error_response(_error, _error.EMPTY_FIELD)
@staticmethod
async def handle_authorization_form(data, request):
session = await get_session(request)
if 'email' in data and 'password' in data:
email = data['email']
password = data['password']
if email and password:
user = User.select().where(User.email == email, User.password == password)
if user.__len__() == 1:
user = user.get()
auth_token = user.encode_auth_token().decode("utf-8")
session['auth_token'] = auth_token
return web.json_response({'auth_token': auth_token})
else:
return _error.error_response(_error, _error.INVALID_LOGIN)
return _error.error_response(_error, _error.EMPTY_FIELD) |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate Assignees to ACL
Create Date: 2017-10-05 14:55:16.487208
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from collections import namedtuple
from sqlalchemy import text
from ggrc.migrations.utils.resolve_duplicates import create_new_table
revision = '33d043d8ba29'
down_revision = '356f329cda52'
AC_PERMISSIONS = namedtuple("AC_PERMISSIONS", "read update delete mandatory")
ASSIGNEE_ROLES = {
"Creators": AC_PERMISSIONS(1, 1, 0, 1),
"Assignees": AC_PERMISSIONS(1, 1, 0, 1),
"Verifiers": AC_PERMISSIONS(1, 1, 0, 0),
}
MAPPED_ROLES = {
"Creators Mapped": AC_PERMISSIONS(1, 0, 0, 0),
"Assignees Mapped": AC_PERMISSIONS(1, 0, 0, 0),
"Verifiers Mapped": AC_PERMISSIONS(1, 0, 0, 0),
# Mapped Document has another set of permissions
"Creators Document Mapped": AC_PERMISSIONS(1, 1, 1, 0),
"Assignees Document Mapped": AC_PERMISSIONS(1, 1, 1, 0),
"Verifiers Document Mapped": AC_PERMISSIONS(1, 1, 1, 0),
}
ASSIGNEE_MAPPED_ROLES = dict(ASSIGNEE_ROLES, **MAPPED_ROLES)
ROLES_TRANSLATION = [
("Creator", "Creators"),
("Assessor", "Assignees"),
("Verifier", "Verifiers"),
]
def create_translation_table(table_name, translation_data):
"""Create table to translate one names to anothers"""
roles_translation_tbl = create_new_table(
table_name,
sa.Column("new_name", sa.String(length=100)),
sa.Column("old_name", sa.String(length=100)),
)
op.bulk_insert(
roles_translation_tbl,
[{"old_name": old, "new_name": new} for old, new in translation_data]
)
def update_comments():
"""Update recipients for assessment and assignee_type for old comments"""
# Update recipients in assessments
op.execute("""
UPDATE assessments a
JOIN (
SELECT a.id, group_concat(art.new_name SEPARATOR ',') recipients
FROM assessments a
JOIN ac_roles_translation art
ON LOCATE(art.old_name, a.recipients) > 0
GROUP BY a.id
) temp ON temp.id = a.id
SET a.recipients = temp.recipients;
""")
# Update assignee_type in comments
op.execute("""
UPDATE comments a
JOIN (
SELECT a.id, group_concat(art.new_name SEPARATOR ',') assignee_type
FROM comments a
JOIN ac_roles_translation art
ON LOCATE(art.old_name, a.assignee_type) > 0
GROUP BY a.id
) temp ON temp.id = a.id
SET a.assignee_type = temp.assignee_type;
""")
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
connection = op.get_bind()
# We suppose that there are no custom roles with names from
# ASSIGNEE_MAPPED_ROLES. If such roles exist need to raise error
# and remove they manually
res = connection.execute(
text("""
SELECT name
FROM access_control_roles
WHERE object_type = 'Assessment' and name IN :assignee_roles
"""),
assignee_roles=ASSIGNEE_MAPPED_ROLES.keys()
).fetchone()
if res:
raise Exception(
"Custom Role with name '{}' already exists in db. "
"Migration will be stopped".format(res[0])
)
op.add_column(
"access_control_list",
sa.Column("parent_id", sa.Integer(), nullable=True)
)
op.create_foreign_key(
"fk_access_control_list_parent_id",
"access_control_list", "access_control_list",
["parent_id"], ["id"],
ondelete="CASCADE"
)
op.drop_constraint(
"access_control_list_ibfk_3",
"access_control_list",
"foreignkey"
)
op.drop_constraint("person_id", "access_control_list", "unique")
op.create_unique_constraint(
"person_id",
"access_control_list",
["person_id", "ac_role_id", "object_id", "object_type", "parent_id"]
)
op.create_foreign_key(
"access_control_list_ibfk_3",
"access_control_list", "people",
["person_id"], ["id"],
)
op.add_column(
"access_control_roles",
sa.Column("internal", sa.Boolean(), nullable=False, server_default="0")
)
create_translation_table("ac_roles_translation", ROLES_TRANSLATION)
# Create new AC roles for assignees
for assignee_role, permissions in ASSIGNEE_MAPPED_ROLES.items():
connection.execute(
text("""
INSERT INTO access_control_roles(
name, object_type, created_at, updated_at, `read`, `update`,
`delete`, mandatory, non_editable, internal
)
VALUES(
:role, :object_type, NOW(), NOW(), :read, :update,
:delete, :mandatory, :non_editable, :internal
);
"""),
role=assignee_role,
object_type="Assessment",
read=permissions.read,
update=permissions.update,
delete=permissions.delete,
mandatory=permissions.mandatory,
non_editable="1",
internal="1" if assignee_role in MAPPED_ROLES else "0",
)
# Enable creation of temp tables
op.execute("SET AUTOCOMMIT = 1;")
op.execute("""
CREATE TEMPORARY TABLE temp_assigned_objects (
assignable_id int(11),
assignable_type varchar(250),
person_id int(11),
role_id int(11),
role_name varchar(250),
context_id int(11),
created_at datetime,
updated_at datetime
);
""")
# Add index to temp table to speed up migration process
op.create_index(
'fk_temp_assigned_objects',
'temp_assigned_objects',
['assignable_id', 'assignable_type'],
unique=False)
# Migrate existing assignees to access_control_list
connection.execute(
text("""
INSERT INTO temp_assigned_objects(
assignable_id, assignable_type, person_id,
role_id, role_name, context_id, created_at, updated_at
)
SELECT assignable_id, assignable_type, person_id,
role_id, max(role_name), max(context_id),
max(created_at), max(updated_at)
FROM(
SELECT r.destination_id assignable_id,
r.destination_type assignable_type,
r.source_id person_id,
acr.id role_id,
acr.name role_name,
r.context_id,
r.created_at,
r.updated_at
FROM relationships r
JOIN relationship_attrs ra ON r.id = ra.relationship_id
JOIN access_control_roles acr ON acr.name IN :assignee_types AND
acr.object_type = r.destination_type
JOIN ac_roles_translation art ON art.new_name = acr.name
WHERE source_type = 'Person' AND
LOCATE(art.old_name, ra.attr_value) > 0
UNION ALL
SELECT r.source_id, r.source_type, r.destination_id,
acr.id, acr.name, r.context_id, r.created_at, r.updated_at
FROM relationships r
JOIN relationship_attrs ra ON r.id = ra.relationship_id
JOIN access_control_roles acr ON acr.name IN :assignee_types AND
acr.object_type = r.source_type
JOIN ac_roles_translation art ON art.new_name = acr.name
WHERE destination_type = 'Person' AND
LOCATE(art.old_name, ra.attr_value) > 0
) tmp
GROUP BY assignable_id, assignable_type, person_id, role_id;
"""),
assignee_types=ASSIGNEE_ROLES.keys()
)
op.execute("""
INSERT INTO access_control_list(
person_id, ac_role_id, object_id, object_type,
created_at, updated_at, context_id
)
SELECT person_id, role_id, assignable_id, assignable_type,
created_at, updated_at, context_id
FROM temp_assigned_objects;
""")
op.execute("""
CREATE TEMPORARY TABLE temp_mapped_objects (
mapped_id int(11),
mapped_type varchar(250),
person_id int(11),
role varchar(250),
context_id int(11),
parent_id int(11),
created_at datetime,
updated_at datetime
);
""")
op.execute("""
INSERT INTO temp_mapped_objects(
mapped_id, mapped_type, person_id,
role, context_id, parent_id, created_at, updated_at
)
SELECT r.source_id,
r.source_type,
tao.person_id,
CONCAT(tao.role_name,
CASE
WHEN r.source_type = 'Document' THEN ' Document'
ELSE ''
END,
' Mapped'),
r.context_id,
acl.id,
r.created_at,
r.updated_at
FROM relationships r
JOIN temp_assigned_objects tao ON
tao.assignable_id = r.destination_id AND
tao.assignable_type = r.destination_type
JOIN access_control_list acl ON
acl.object_type = tao.assignable_type AND
acl.object_id = tao.assignable_id AND
acl.person_id = tao.person_id
WHERE r.source_type != 'Person';
""")
op.execute("""
INSERT INTO temp_mapped_objects(
mapped_id, mapped_type, person_id,
role, context_id, parent_id, created_at, updated_at
)
SELECT r.destination_id,
r.destination_type,
tao.person_id,
CONCAT(tao.role_name,
CASE
WHEN r.destination_type = 'Document' THEN ' Document'
ELSE ''
END,
' Mapped'),
r.context_id,
acl.id,
r.created_at,
r.updated_at
FROM relationships r
JOIN temp_assigned_objects tao ON
tao.assignable_id = r.source_id AND
tao.assignable_type = r.source_type
JOIN access_control_list acl ON
acl.object_type = tao.assignable_type AND
acl.object_id = tao.assignable_id AND
acl.person_id = tao.person_id
WHERE r.destination_type != 'Person';
""")
op.execute("""
INSERT INTO access_control_list(
person_id, ac_role_id, object_id, object_type,
created_at, updated_at, context_id, parent_id
)
SELECT tmo.person_id, acr.id, tmo.mapped_id, tmo.mapped_type,
max(tmo.created_at), max(tmo.updated_at), max(tmo.context_id),
tmo.parent_id
FROM temp_mapped_objects tmo
JOIN access_control_roles acr ON
acr.object_type = "Assessment" AND
acr.name = tmo.role
GROUP BY tmo.person_id, acr.id, tmo.mapped_id, tmo.mapped_type,
tmo.parent_id;
""")
op.execute("""
DROP TABLE IF EXISTS temp_assigned_objects;
""")
op.execute("""
DROP TABLE IF EXISTS temp_mapped_objects;
""")
update_comments()
op.execute("""
DROP TABLE IF EXISTS ac_roles_translation;
""")
op.execute("SET AUTOCOMMIT = 0;")
op.execute("""
UPDATE notification_types
SET name = 'assessment_assignees_reminder'
WHERE name = 'assessment_assessor_reminder';
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
connection = op.get_bind()
connection.execute(
text("""
DELETE acl
FROM access_control_list acl
JOIN access_control_roles acr ON acr.id = acl.ac_role_id
WHERE acr.name IN :assignee_types
"""),
assignee_types=ASSIGNEE_MAPPED_ROLES.keys()
)
connection.execute(
text("""
DELETE FROM access_control_roles
WHERE name IN :assignee_types
"""),
assignee_types=ASSIGNEE_MAPPED_ROLES.keys()
)
op.drop_constraint(
"fk_access_control_list_parent_id",
"access_control_list",
"foreignkey",
)
op.drop_column("access_control_list", "parent_id")
op.drop_column("access_control_roles", "internal")
create_translation_table(
"ac_roles_translation",
[(new, old) for old, new in ROLES_TRANSLATION]
)
update_comments()
op.execute("""
DROP TABLE IF EXISTS ac_roles_translation;
""")
op.execute("""
UPDATE notification_types
SET name = 'assessment_assessor_reminder'
WHERE name = 'assessment_assignees_reminder';
""")
op.drop_constraint(
"access_control_list_ibfk_3",
"access_control_list",
"foreignkey"
)
op.drop_constraint("person_id", "access_control_list", "unique")
op.create_unique_constraint(
"person_id",
"access_control_list",
["person_id", "ac_role_id", "object_id", "object_type"]
)
op.create_foreign_key(
"access_control_list_ibfk_3",
"access_control_list", "people",
["person_id"], ["id"],
)
|
import random #to shuffle the training set
import time #to time learning and and classification
import nltk
from textblob import TextBlob #to tokenize our sentences into words
from nltk.corpus import stopwords #to remove unwanted stop words
from textblob.classifiers import NaiveBayesClassifier
def get_list_tuples(read_file):
list_tuples = []
with open(read_file,"r") as r:
c=0
for line in r:
tabsep = line.strip().split('\t')
msg = TextBlob(tabsep[1])
try:
words=msg.words
except:
continue
for word in words:
if word not in stopwords.words() and not word.isdigit():
list_tuples.append((word.lower(),tabsep[0]))
c+=1 #limiting factor begins
if c==500: #limiting factor ends
return list_tuples
a = time.time()
entire_data = get_list_tuples("SMSSpamCollection.txt")
print "It took "+str(time.time()-a)+" seconds to import data"
print 'data imported'
random.seed(1)
random.shuffle(entire_data)
train = entire_data[:250]
test = entire_data[251:500]
print 'training data'
a = time.time()
cl = NaiveBayesClassifier(train)
print "It took "+str(time.time()-a)+" seconds to train data"
print 'data trained, now checking accuracy:'
accuracy = cl.accuracy(test)
print "accuracy: "+str(accuracy)
print cl.classify("Hey bud, what's up") #ham
print cl.classify("Get a brand new mobile phone by being an agent of The Mob! Plus loads more goodies! For more info just text MAT to 87021") #spam
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.