index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,700 | 080ebe5bc2786436664018612e3064856dd65b4e | #
# Imports
#
from wolfulus import *
from ..util.player import *
from ..util.chat import *
from ..util.timer import *
import random
#
# Command
#
class MataMataCommand(Command):
# Constantes... nao mexer
UP = 1
DOWN = 2
# Constructor
def __init__(self):
self.register('/chamar', self.command_chamar)
self.register('/regrasmt', self.command_regras)
self.register('/fase', self.command_fase_generico)
self.register('/semi', self.command_semi)
self.register('/disputa3', self.command_disputa)
self.register('/final', self.command_final)
self.register('/abrirnovaarena', self.command_open)
self.register('/novaarena', self.command_go)
self.register('/wins', self.command_finalizar)
self.time = 0
self.timer = False
self.open = False
self.players = dict()
self.lado = self.UP
self.fighter1 = None
self.fighter2 = None
return
# Comando finalizar
def command_finalizar(self, player, arguments):
if not player.is_admin():
return True
if len(arguments) != 1:
player.message('Uso: /wins <nome do vencedor>')
return True
if (self.fighter1 is None or self.fighter2 is None):
player.message('[Sistema] Nenhuma luta foi realizada.')
return True
index = Server.find_by_name(arguments[0])
if (index >= 0):
target = Player(index)
self.switch_sides(target)
Server.send_announcement_all('%s wins' % target.get_name())
if (target.get_name() == self.fighter1.get_name()):
self.fighter2.teleport(0, 125, 125)
elif (target.get_name() == self.fighter2.get_name()):
self.fighter1.teleport(0, 125, 125)
return True
# Comando de abrir evento
def command_open(self, player, arguments):
if not player.is_admin():
return True
if len(arguments) != 1 or not arguments[0].isdigit():
player.message('Uso: /abrirnovaarena <tempo>')
return True
self.time = int(arguments[0])
self.open = True
self.players = dict()
self.lado = self.UP
self.fighter1 = None
self.fighter2 = None
if self.timer != False:
timer.clear(self.timer)
self.timer = timer.repeat(self.command_timer, 1000, self.time + 1)
player.message('[Sistema] Nova Arena foi aberta!')
Server.send_message_all('[Sistema] %s abriu Nova Arena!' % player.get_name())
Server.send_announcement_all('[Sistema] Move ativado!')
Server.send_announcement_all('Digite /novaarena para ir ao evento!')
return True
# Comando para entrar no evento
def command_go(self, player, arguments):
if self.open == False:
player.message('[Sistema] Nova Arena nao esta aberta no momento.')
else:
if not player.get_name() in self.players.keys():
self.players[player.get_name()] = player.get_index()
player.message('[Sistema] Voce sera movido em alguns segundos..')
player.message('Nao relogue, nao mova ou sera eliminado!')
else:
player.message('[Sistema] Voce sera movido em alguns segundos..')
return True
# Timer de mensagem do sistema
def command_timer(self):
if (self.time == 0):
self.open = False
self.timer = False
for name in self.players.keys():
player = Player(self.players[name])
if (player.get_name() == name):
if (player.is_playing()):
player.teleport(6, 60, 210)
Server.send_announcement_all('[Sistema] Move /novaarena foi desativado, aguarde o proximo evento!')
else:
Server.send_announcement_all('[Sistema] Move /novaarena fecha em %d segundos.' % self.time)
self.time = self.time - 1
return
# Area de espera de baixo
def waiting_area_up(self, x, y):
if (x >= 50 and y >= 180):
if (x <= 75 and y <= 230):
return True
return False
# Area de espera de cima
def waiting_area_down(self, x, y):
if (x >= 50 and y >= 122):
if (x <= 75 and y <= 160):
return True
return False
# Sends a player to the other side
def switch_sides(self, player):
if self.lado == self.DOWN:
player.teleport(6, 60, 210)
elif self.lado == self.UP:
player.teleport(6, 60, 140)
return
# Is on waiting area
def is_on_waiting_area(self, player):
if player.get_map() == 6:
if self.lado == self.UP:
if self.waiting_area_up(player.get_x(), player.get_y()):
return True
elif self.lado == self.DOWN:
if self.waiting_area_down(player.get_x(), player.get_y()):
return True
return False
# Comando de abrir evento
def command_chamar(self, player, arguments):
if not player.is_admin():
return True
players = []
for i in range(Server.player_start, Server.object_max):
p = Player(i)
if p.is_admin():
continue
if not p.is_playing():
continue
if self.is_on_waiting_area(p):
players.append(i)
if len(players) > 1:
random.shuffle(players)
p1 = players[0]
players.pop(0)
p2 = players[0]
players.pop(0)
player1 = Player(p1)
player1.teleport(6, 63, 172)
player1.message('[Sistema] Sua vez, prepare-se para a luta!')
self.fighter1 = player1
player2 = Player(p2)
player2.teleport(6, 63, 173)
player2.message('[Sistema] Sua vez, prepare-se para a luta!')
self.fighter2 = player2
Server.send_message_all('[Sistema] %s chamou a proxima luta!' % player.get_name())
player.message('[Sistema] Restam (%d) jogadores para lutar!' % len(players))
Server.send_announcement_all('<< [%s] >>' % player.get_name())
Server.send_announcement_all('%s vs %s' % (player1.get_name(), player2.get_name()))
elif len(players) == 1:
p = Player(players[0])
self.switch_sides(p)
Server.send_announcement_all('%s passa para a proxima fase por falta de adversario.' % p.get_name())
player.message('[Sistema] Todos os jogadores ja lutaram!')
player.message('Avance de fase para prosseguir com o evento!!')
else:
player.message('[Sistema] Todos os jogadores ja lutaram!')
player.message('Avance de fase para prosseguir com o evento!!')
return True
# Comando de abrir evento
def command_regras(self, player, arguments):
if not player.is_admin():
return True
player.message('[Sistema] As regras foram ditas!')
Server.send_message_all('[Sistema] %s passou as Regras! Leia o global.' % player.get_name())
Server.send_announcement_all('[Sistema] Regras do evento:')
Server.send_announcement_all('- Lutas de 1 round, final com 3 rounds.')
Server.send_announcement_all('- Andou, TS, Antes=infracao / 2=eliminado!')
Server.send_announcement_all('- Entrou na area de PVP = movido!')
Server.send_announcement_all('- Nao fique away, nao vamos esperar voltar!')
Server.send_announcement_all('----> Use /re off , Boa sorte! <----')
return True
# Mensagem fase
def command_fase(self, player, fase):
player.message('[Sistema] Fase %d iniciada!' % (fase))
Server.send_announcement_all('==========================')
Server.send_announcement_all('~ Fase (%d) do Mata-Mata iniciada! ~' % (fase))
Server.send_announcement_all('==========================')
return True
# Comando de fase
def command_fase_generico(self, player, arguments):
if not player.is_admin():
return True
if len(arguments) != 1 or not arguments[0].isdigit():
player.message('Uso: /fase <numero_da_fase>')
return True
fase = int(arguments[0])
if fase % 2 == 1:
self.lado = self.UP
else:
self.lado = self.DOWN
self.command_fase(player, int(arguments[0]))
return True
# Comando de semi final
def command_semi(self, player, arguments):
if not player.is_admin():
return True
player.message('[Sistema] Fase semi-final iniciada!')
Server.send_announcement_all('==========================')
Server.send_announcement_all('~ Semi-Final do Mata-Mata iniciada! ~')
Server.send_announcement_all(' Regras: 2 Rounds, matou = 2x Stabs ')
Server.send_announcement_all('==========================')
if self.lado == self.UP:
self.lado = self.DOWN
else:
self.lado = self.UP
return True
# Comando de disputa
def command_disputa(self, player, arguments):
if not player.is_admin():
return True
player.message('[Sistema] Disputa do terceiro lugar iniciada!')
Server.send_announcement_all('==========================')
Server.send_announcement_all('~ Disputa do terceiro lugar iniciada! ~')
Server.send_announcement_all(' Regras: 2 Rounds, matou = 2x Stabs ')
Server.send_announcement_all('==========================')
if self.lado == self.UP:
self.lado = self.DOWN
else:
self.lado = self.UP
return True
# Comando de final
def command_final(self, player, arguments):
if not player.is_admin():
return True
player.message('[Sistema] Fase final iniciada!')
Server.send_announcement_all('==========================')
Server.send_announcement_all('~ Final do Mata-Mata iniciada! ~')
Server.send_announcement_all(' Regras: 3 Rounds, matou = 2x Stabs ')
Server.send_announcement_all('==========================')
if self.lado == self.UP:
self.lado = self.DOWN
else:
self.lado = self.UP
return True
#
# Initialization
#
commands.register(MataMataCommand()) |
984,701 | 2ee486df42029d9fcf824f02fb6b31fc1d02a49a | from setuptools import setup, find_packages
from pip.req import parse_requirements
import os
# hack for working with pandocs
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
utf8 = codecs.lookup('utf-8')
func = lambda name, enc=utf8: {True: enc}.get(name=='mbcs')
codecs.register(func)
# install readme
readme = os.path.join(os.path.dirname(__file__), 'README.md')
try:
import pypandoc
long_description = pypandoc.convert(readme, 'rst')
except (IOError, ImportError):
long_description = ""
# include template
data_files = []
eager_files = []
# Figure out the necessary stuff for the template
rel_path = 'fig_py/default_template'
for dir_name, dir_list, filename_list in os.walk(rel_path):
file_list = filter(lambda f: not f.endswith('.pyc'), filename_list)
file_list = [os.path.join(dir_name, filename) for filename in file_list]
data_files.append((dir_name, file_list))
eager_files.extend(file_list)
# setup
setup(
name='fig-py',
version='0.0.5',
description='An utility for configuring python projects from jinja templates.',
long_description = long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
keywords='',
author='Brian Abelson',
author_email='brian@newslynx.org',
url='http://github.com/newslynx/fig-py',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"Jinja2==2.7.2",
"MarkupSafe==0.23",
"PyYAML==3.11",
"Pygments==1.6",
"Sphinx==1.2.2",
"docutils==0.11",
"nose==1.3.3",
"pypandoc==0.8.2",
"wsgiref==0.1.2"
],
tests_require=[],
data_files = data_files,
eager_resources = eager_files,
entry_points = {
'console_scripts': [
'fig = fig_py:main',
]
}
)
|
984,702 | 6855a0376a00531e0099e67229773569e1845d5d | from django.template import add_to_builtins
add_to_builtins('globaltags.get_menu')
|
984,703 | a2003aa6d39cb3f43b92f434408afe4eabe365f0 | inp = input()
tmp = inp[2:len(inp) - 2]
nums = tmp.split('],[')
n = len(nums)
lst = []
moves = []
for num in nums:
lst.append(int(num[0]))
lst.append(int(num[2]))
moves.append(lst)
A = [0] * 8
B = [0] * 8
l = len(moves)
for n in range(l):
if n % 2:
B[moves[n][0]] += 1
B[moves[n][1] + 3] += 1
if moves[n][0] == moves[n][1]:
B[6] += 1
if moves[n][0] + moves[n][1] == 2:
B[7] += 1
else:
A[moves[n][0]] += 1
A[moves[n][1] + 3] += 1
if moves[n][0] == moves[n][1]:
A[6] += 1
if moves[n][0] + moves[n][1] == 2:
A[7] += 1
if max(A) == 3:
print('A')
elif max(B) == 3:
print('B')
elif l == 9:
print('Draw')
else:
print('Pending')
|
984,704 | 615463f98aca6b9cbe0b821dc1b3e2322fb30c87 |
# 回调函数 (扩展)
# 函数定义
def f1(n, fn): # fn = callback
print("n =", n)
a = n*n
fn(a) # callback() 回调
# 函数调用,
# 回调函数
def callback(a):
print("callback, a =", a)
# 正向调用
f1(2, callback)
# 进程:正在运行的软件
# 线程:进程中的多个分支
# 同步:在同一个线程中执行
# 异步:在不同的线程中执行
|
984,705 | 7bcd31e3d0f549975eeddffafb413c54126bff85 | # import csv
import numpy as np
from sklearn import mixture
import scipy
from scipy.stats import multivariate_normal
from sklearn import metrics
from copy import deepcopy
f = open('rank.txt', 'r')
b=f.read()
a=eval(b)
# b=b.split('\n')
# a=[]
# for x in b:
# a.append(eval(x))
# print (a)
# b=[]
# for x in a:
# y=x
# del x[2]
# del x[6]
# b.app
trueRank = np.asarray(a)
# trueRank= scipy.delete(trueRank, 2, 1)
# trueRank= scipy.delete(trueRank, 6, 1)
print (len(trueRank))
f = open('../../nlabel.txt', 'r')
labels = [eval(line.strip()) for line in f]
n_att = len(trueRank[0])
n_class= 8
n = len(labels)
### Rank to be loaded from rank.txt
Rank = np.zeros((n,n_att))
data= [[] for i in range(n_class)]
train_len=680
for i in range(train_len):
data[labels[i]-1].append(trueRank[i])
for i in range(n_class):
print(len(data[i-1]))
gaudist=[]
mean=[]
covar=[]
# unseen = [[[4,2],[1,8],[4,6],[8,1],[1,8],[1,4], [6,7], [4,3],[2,1],[3,2]]
# ,[[4,7], [1,2], [3,7], [4,8], [2,7], [3,8], [1,7], [5,6], [2,1], [4,8]]
# ,[[6,8], [3,4], [4,6], [8,6], [4,5], [5,7],[4,1], [4,3], [7,2],[2,1]]];
# unseen = [[[4,1],[1,2],[8,5],[1,2],[1,2], [6,1],[2,8],[3,4]]
# ,[[4,1], [1,2], [4,3], [2,3], [3,1], [1,3], [2,8], [4,7]]
# ,[[6,5], [3,8], [8,5], [4,8], [5,6],[4,6], [7,6],[2,5]]];
unseen=[[[2,3],[1,3],[3,7],[5,7],[2,3],[4,8],[4,3],[2,1],[1,6],[8,5]]
,[[2,3],[1,3],[7,1],[1,3],[2,3],[4,8],[2,8],[5,2],[5,1],[8,5]]
,[[5,6],[7,6],[8,4],[6,8],[4,5],[6,7],[4,3],[7,3],[1,6],[2,6]]];
# unseen=[]
for i in range(n_class):
g=mixture.GMM(n_components=1,covariance_type='full')
g.fit(data[i])
mean.append(g.means_[0])
covar.append(g.covars_[0])
gaudist.append(g)
for i in range(len(unseen)):
me=[]
s=[0 for i in range(n_class)]
support=0
for j in range(len(unseen[i])):
me.append(np.array((mean[unseen[i][j][0]-1][j] + mean[unseen[i][j][1]-1][j])/2))
s[unseen[i][j][0]-1]=1
s[unseen[i][j][1]-1]=1
fl=0
for j in range(n_class):
if fl:
support+=1
co+=s[j]*covar[j]
elif s[j]:
support+=1
co=deepcopy(covar[j])
fl=1
mean.append(me)
covar.append(np.array(co)/support)
predclass=[]
for i in range(len(labels[train_len:])):
p=0
clas=1
for j in range(len(mean)):
# print (j)
temp=multivariate_normal(mean[j],covar[j]).pdf(trueRank[train_len+i])
# print(temp)
if(temp> p):
p=temp
clas=j+1
predclass.append(clas)
print (labels[train_len+i],clas)
print(metrics.classification_report(predclass,labels[train_len:]))
|
984,706 | 7927ca404d5ac505b49b975d44e6aa2ac0f69628 | #!/usr/bin/env python
import requests
import datetime
from BeautifulSoup import BeautifulSoup
import urlparse
from termcolor import colored
def request(url):
try:
return requests.get("http://"+url)
except requests.exceptions.ConnectionError:
pass
target_url = "adityatekkali.edu.in"
with open("nova.txt","r") as wordlist_file:
for line in wordlist_file:
word = line.strip()
test_url = target_url + "/" + word
response = request(test_url)
if response:
# response1 = request(test_url)
parsed_html = BeautifulSoup(response.content)
forms_list = parsed_html.findAll("form")
# print(test_url)
print colored(test_url, 'green')
with open("nova_result.txt", 'a') as f:
print >> f, test_url + "\n"
|
984,707 | 0e73297ad988b8dbb3b15f5a78759c4975a2f351 | try:
import pip
except:
import roman
else:
try:
import roman
except:
pip.main(["install","roman"])
import sys
size = 30000
cells = [0]*size
pointer = 0
try:
with open(sys.argv[1]) as file:
nScript = [line.strip(" ") for line in file]
except:
get = input("Code: ")
nScript = get.split()
#print(nScript)
romanScript = ""
for i in nScript:
try:
romanScript += " ".join(roman.toRoman(int(i)))
except:
romanScript += i
#print(romanScript)
inputString = input("Input: ")
inIndex = 0
i = 0
while True:
#print(romanScript[i],end="")
if romanScript[i].upper() == "I":
cells[pointer]+=1
elif romanScript[i].upper() == "V":
cells[pointer]-=1
elif romanScript[i].upper() == "X":
pointer+=1
elif romanScript[i].upper() == "L":
pointer-=1
elif romanScript[i].upper() == "D":
print(chr(cells[pointer]),end="")
try:
cells[pointer] = ord(inputString[inIndex])
except:
cells[pointer] = 0
inIndex+=1
elif romanScript[i].upper() == "C":
if cells[pointer] == 0:
count = 1
while count > 0:
i+=1
if romanScript[i].upper() == "C":
count+=1
elif romanScript[i].upper() == "M":
count-=1
elif romanScript[i].upper() == "M":
if cells[pointer] != 0:
count = -1
while count < 0:
i-=1
if romanScript[i].upper() == "C":
count+=1
elif romanScript[i].upper() == "M":
count-=1
i+=1
if i == len(romanScript):
sys.exit(0)
|
984,708 | 8b2b4e314a2f7e0bf2addafdf2880f2832b091dc | from mcpi.minecraft import Minecraft as mcs
mc = mcs.create()
|
984,709 | 124e654889603a1b3b2dc88340c1b55477e4d8b0 | # Assignment 3
# 010123102 Computer Programming Fundamental
#
# Assignment 3.3
# Given a number n, return True if n is in the range 1..10, inclusive.
# Unless "outsideMode" is True, in which case return True if the number is less or equal to 1, or greater or equal to 10.
#
# Phattharanat Khunakornophat
# ID 5901012610091
# SEP 1 2016
# Due Date SEP. 6 2016
num = int(input('Enter the integer: '))
outsideMode = str(input('Enable Outside Mode? Y/N: '))
def checkRange(num, outsideMode):
i = True
if outsideMode == 'Y' and num not in range(2, 10):
print(i)
elif outsideMode == 'N' and num in range(1, 11):
print(i)
else:
print(not i)
checkRange(num, outsideMode)
|
984,710 | 36b2281d07c4dbddaa6e204b8c003d29c01ebfe4 | import hashlib
import time
import uuid
import requests
url = 'https://openapi.youdao.com/api'
APP_ID = '45a132825a61cef4'
APP_KEY = 'coTBoQjo3vi6tHwpDs1JDlMpslml98z2'
def get(form, to, word):
data = {}
data['from'] = form
data['to'] = to
data['signType'] = 'v3'
curtime = str(int(time.time()))
salt = str(uuid.uuid1())
data['curtime'] = curtime
src = APP_ID + truncate(word) + salt + curtime + APP_KEY
sign = encrypt(src)
data['appKey'] = APP_ID
data['q'] = word
data['salt'] = salt
data['sign'] = sign
response = do_request(data).json()
return response
def encrypt(signStr):
hash_algorithm = hashlib.sha256()
hash_algorithm.update(signStr.encode('utf-8'))
return hash_algorithm.hexdigest()
def truncate(q):
if q is None:
return None
q_utf8 = q.decode("utf-8")
size = len(q_utf8)
return q_utf8 if size <= 20 else q_utf8[0:10] + str(size) + q_utf8[size - 10:size]
def do_request(data):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return requests.post(url, data=data, headers=headers)
def get_translate_Chinese(word):
word=word.encode('UTF-8')
response = get('zh-CHS', 'en', word)
try:
translate=''
translates = response.get('translation')
for i in translates:
translate = translate+i
try:
web=response.get('web')
for i in web.get('value'):
translate=translate+';'+i
except :
pass
try:
explains = response.get('basic')
for i in explains.get('explains'):
translate = translate+';' + i
except:
pass
src = '翻译:\n' + translate
return src
except:
return '暂无此词翻译'
def get_translate_English(word):
word=word.encode('UTF-8')
response = get('en', 'zh-CHS', word)
try:
translate=''
try:
translates = response.get('translation')
for i in translates:
translate = translate+i+'\n'
except:
pass
try:
explains = response.get('basic')
for i in explains.get('explains'):
translate = translate+i+'\n'
except:
pass
src = '翻译:\n' + translate
return src
except :
return '暂无此词翻译'
|
984,711 | e518bca0e64666aa16bb1abb1c11e3f96f983dcf | from .models import Faturamento
from rest_framework import serializers
class FaturamentoSerializer(serializers.ModelSerializer):
class Meta:
model = Faturamento
fields = '__all__' |
984,712 | 078b8019936c332a5dcda95543de7a56bd7aa58e | import urllib2
from xml.etree.ElementTree import XML, SubElement, tostring
url = 'https://polokelo-bookings.appspot.com/externalbookings'
# get a new collection number
xml = """
<testgenerator>
<action>generate collection number</action>
</testgenerator>
"""
req = urllib2.Request(url, xml, headers={'Content-Type':'text/plain'})
response = urllib2.urlopen(req)
xmlroot = XML(response.read())
collection_number = xmlroot.findtext('collectionnumber')
# get a new enquiry number
xml = """
<testgenerator>
<action>generate enquiry number</action>
</testgenerator>
"""
req = urllib2.Request(url, xml, headers={'Content-Type':'text/plain'})
response = urllib2.urlopen(req)
xmlroot = XML(response.read())
enquiry_number = xmlroot.findtext('enquirynumber')
# post the check availability request
xml = """
<enquiry>
<enquirybatchnumber>%s</enquirybatchnumber>
<email>jurgen.blignaut@gmail.com</email>
<guestagentcode>GA000</guestagentcode>
<action>check availability</action>
<enquirynumber>%s</enquirynumber>
<city>PCS</city>
<accommodation>
<type>HOM</type>
<rooms><single>1</single>
<twin>0</twin>
<double>0</double>
<family>0</family>
</rooms>
</accommodation>
<startdate>2010-6-18</startdate>
<duration>3</duration>
<adults>1</adults>
<children>0</children>
<disability>
<wheelchairaccess>no</wheelchairaccess>
<otherspecialneeds>no</otherspecialneeds>
</disability>
</enquiry>
""" % (collection_number, enquiry_number)
req = urllib2.Request(url, xml, headers={'Content-Type':'text/plain'})
response = urllib2.urlopen(req)
print response.read()
|
984,713 | 8a777e5145c63ca5f1f314cd60e4ca482ee5c474 | from. createAnimals import WalkingAnimal, Snake, SwimmingAnimal, Llama |
984,714 | 39d23e82d798b368977ff2300b6c580b8590e0d4 | '''
给定一个三角形,找出自顶向下的最小路径和。每一步只能移动到下一行中相邻的结点上。
例如,给定三角形:
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
自顶向下的最小路径和为 11(即,2 + 3 + 5 + 1 = 11)。
说明:
如果你可以只使用 O(n) 的额外空间(n 为三角形的总行数)来解决这个问题,那么你的算法会很加分。
'''
# class Solution(object):
# def minimumTotal(self, triangle):
# """
# :type triangle: List[List[int]]
# :rtype: int
# """
# '''
# dp问题:利用列表进行存储,每一行每个步骤结束后的最小值,那么在最后一行,其最小值为min(4+dp[0],4+dp[1],1+dp[0],1+dp[1]...)
# 所以状态转移方程为: 如果i==0 or i==len(triangle[row]) 那么其转移方程为dp[i]=dp[0]triangle[row][i] dp[i]=dp[i-1]+triangle[row][i]
# dp[i]=min(dp[i-1],dp[i])+triangle[row][i]
# 初始值为 dp[0]=triangle[0][0]
# '''
# if len(triangle)==1:
# return triangle[0][0]
# dp=[[triangle[0][0]]]
# for i in range(1,len(triangle)):
# for j in range(len(triangle[i])):
# dp.append([])
# # 边界只有一个邻边
# if j==0:
# dp[i].append(dp[i-1][j]+triangle[i][j])
# elif j==len(triangle[i])-1:
# dp[i].append(dp[i-1][j-1]+triangle[i][j])
# else:
# # 当前取值,在上一层的邻边最小值相加
# dp[i].append(min(dp[i-1][j-1],dp[i-1][j])+triangle[i][j])
# return min(dp[len(triangle)-1])
class Solution(object):
def minimumTotal(self, triangle):
nums=triangle
if nums==[[]]:
return 0
for b in range(len(nums)-2,-1,-1):#从下往上,倒数第二行开始
for a in range(len(nums[b])):
nums[b][a]=nums[b][a]+min(nums[b+1][a],nums[b+1][a+1])
return triangle[0][0]
t = [
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
s = Solution()
res = s.minimumTotal(t)
print(res) |
984,715 | aeb966e0ac9dfca2024bfd305754ffceaf4bf21b | import numpy as np
import os
import cv2
import pandas as pd
from torch.utils.data import Dataset
from tqdm import tqdm
import SimpleITK
import scipy.ndimage as ndimage
import SimpleITK as sitk
UPPER_BOUND = 400
LOWER_BOUND = -1000
def load_ct_images(path):
image = SimpleITK.ReadImage(path)
spacing = image.GetSpacing()[-1]
image = SimpleITK.GetArrayFromImage(image).astype(np.float32)
return image, spacing
def load_itkfilewithtrucation(filename, upper=200, lower=-200):
"""
load mhd files,set truncted value range and normalization 0-255
:param filename:
:param upper:
:param lower:
:return:
"""
# 1,tructed outside of liver value
srcitkimage = sitk.Cast(sitk.ReadImage(filename), sitk.sitkFloat32)
srcitkimagearray = sitk.GetArrayFromImage(srcitkimage)
srcitkimagearray[srcitkimagearray > upper] = upper
srcitkimagearray[srcitkimagearray < lower] = lower
# 2,get tructed outside of liver value image
sitktructedimage = sitk.GetImageFromArray(srcitkimagearray)
origin = np.array(srcitkimage.GetOrigin())
spacing = np.array(srcitkimage.GetSpacing())
sitktructedimage.SetSpacing(spacing)
sitktructedimage.SetOrigin(origin)
# 3 normalization value to 0-255
rescalFilt = sitk.RescaleIntensityImageFilter()
rescalFilt.SetOutputMaximum(255)
rescalFilt.SetOutputMinimum(0)
itkimage = rescalFilt.Execute(sitk.Cast(sitktructedimage, sitk.sitkFloat32))
return sitk.GetArrayFromImage(itkimage)
def resize(image, mask, spacing, slice_thickness, scale_ratio):
image = (image - LOWER_BOUND) / (UPPER_BOUND - LOWER_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
image = image.astype(np.float32)
if slice_thickness and scale_ratio:
image = ndimage.zoom(image, (spacing / slice_thickness, scale_ratio, scale_ratio), order=3)
mask = ndimage.zoom(mask, (spacing / slice_thickness, scale_ratio, scale_ratio), order=0)
return image, mask
def load_patient(imgpath, mskpath, slice_thickness=None, scale_ratio=None):
image, spacing = load_ct_images(imgpath)
mask, _ = load_ct_images(mskpath)
image, mask = resize(image, mask, spacing, slice_thickness, scale_ratio)
return image, mask
def pad_if_need(image, mask, patch):
assert image.shape == mask.shape
n_slices, x, y = image.shape
if n_slices < patch:
padding = patch - n_slices
offset = padding // 2
image = np.pad(image, (offset, patch - n_slices - offset), 'edge')
mask = np.pad(mask, (offset, patch - n_slices - offset), 'edge')
return image, mask
def slice_window(image, mask, slice, patch):
image, mask = pad_if_need(image, mask, patch)
n_slices, x, y = image.shape
idx = 0
image_patches = []
mask_patches = []
while idx + patch <= n_slices:
image_patch = image[idx:idx + patch]
mask_patch = mask[idx:idx + patch]
# Save patch
image_patches.append(image_patch)
mask_patches.append(mask_patch)
idx += slice
return image_patches, mask_patches
def slice_builder(imgpath, mskpath, slice_thichness, scale_ratio, slice, patch, save_dir):
image, mask = load_patient(imgpath, mskpath, slice_thichness, scale_ratio)
image_patches, mask_patches = slice_window(image, mask, slice, patch)
patient_id = imgpath.split("/")[-2]
save_dir = os.path.join(save_dir, patient_id)
os.makedirs(save_dir, exist_ok=True)
image_paths = []
mask_paths = []
for i, (image_patch, mask_patch) in enumerate(zip(image_patches, mask_patches)):
image_path = os.path.join(save_dir, f'image.{i}.npy')
mask_path = os.path.join(save_dir, f'mask.{i}.npy')
image_paths.append(image_path)
mask_paths.append(mask_path)
np.save(image_path, image_patch)
np.save(mask_path, mask_patch)
df = pd.DataFrame({
'image': image_paths,
'mask': mask_paths
})
df['patient_id'] = patient_id
return df
def slice_builder_2d(imgpath, mskpath, save_dir):
image, mask = load_patient(imgpath, mskpath)
patient_id = imgpath.split("/")[-2]
save_dir = os.path.join(save_dir, patient_id)
os.makedirs(save_dir, exist_ok=True)
image_paths = []
mask_paths = []
for i, (image_slice, mask_slice) in enumerate(zip(image, mask)):
# if np.any(mask_slice):
image_path = os.path.join(save_dir, f'image.{i}.npy')
mask_path = os.path.join(save_dir, f'mask.{i}.npy')
image_paths.append(image_path)
mask_paths.append(mask_path)
np.save(image_path, image_slice)
np.save(mask_path, mask_slice)
df = pd.DataFrame({
'image': image_paths,
'mask': mask_paths
})
df['patient_id'] = patient_id
return df
def random_crop(image, mask, patch):
n_slices = image.shape[0]
start = 0
end = int(n_slices - patch)
rnd_idx = np.random.randint(start, end)
return image[rnd_idx:rnd_idx + patch, :, :], mask[rnd_idx:rnd_idx + patch, :, :]
def center_crop(image, mask, patch):
n_slices = image.shape[0]
mid = n_slices // 2
start = int(mid - patch // 2)
end = int(mid + patch // 2)
return image[start:end, :, :], mask[start:end, :, :]
class StructSegTrain2D(Dataset):
def __init__(self,
csv_file,
transform
):
df = pd.read_csv(csv_file)
self.transform = transform
self.images = df['image'].values
self.masks = df['mask'].values
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx]
mask = self.masks[idx]
image = np.load(image)
mask = np.load(mask)
image = np.stack((image, image, image), axis=-1).astype(np.float32)
if self.transform:
transform = self.transform(image=image, mask=mask)
image = transform['image']
mask = transform['mask']
# image = np.stack((image, image, image), axis=0).astype(np.float32)
image = np.transpose(image, (2, 0, 1))
# mask = np.transpose(mask, (2, 0, 1))
# image = np.expand_dims(image, axis=0)
mask = mask.astype(np.int)
return {
'images': image,
'targets': mask
}
|
984,716 | 726503040deb67c2b3e5652d26441b3c01dd26d2 | import json
import subprocess
from flask import Flask
def _exec(cmd):
process = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE)
return process.communicate()[0]
app = Flask(__name__)
@app.route("/")
def hello():
running_containers = _exec('docker ps -aq').split('\n')
inspect = json.loads(_exec('docker inspect ' + ' '.join(running_containers)))
nodes = []
for node in inspect:
nodes.append({
'ip': node['NetworkSettings']['Networks']['sisdisewallet_ewallet']['IPAddress'],
'npm': node['Id'][:12]
})
return json.dumps(nodes)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) |
984,717 | 4c4e26d24401ee371810cdd7f666d144a0f9704b | # -*- coding: utf-8 -*-
import logging
import os
import gettext
from ask_sdk_core.dispatch_components import AbstractRequestInterceptor, \
AbstractResponseInterceptor
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_model.ui import SimpleCard
# necessary for local tests
os.environ["AWS_DEFAULT_REGION"] = "eu-west-1"
os.environ["ASK_DEFAULT_DEVICE_LOCALE"] = "it-IT"
os.environ["DEBUG"] = 'True' # Debug variable, set to False once in production to avoid excessive logging
from alexa.utils import convert_speech_to_text
from intent_handlers import \
LaunchRequestHandler, HelpIntentHandler, ExitIntentHandler, \
BaseRequestInterceptor, BaseRequestHandler, CatchAllExceptionHandler, FallbackIntentHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DEBUG = os.environ.get("DEBUG", False) == 'True'
sb = SkillBuilder()
class AddCardInterceptor(AbstractResponseInterceptor):
""" Add a card to every response by translating ssml text to card content """
def process(self, handler_input, response):
# type: (HandlerInput, Response) -> None
_ = handler_input.attributes_manager.request_attributes["_"] # Translator
# the attribute is always present but set to None withouth a card
if getattr(handler_input.response_builder.response, 'card', None) is None:
# Card was not set hard-coded in response
try:
response.card = SimpleCard(title=convert_speech_to_text(_("SKILL_NAME")),
content=convert_speech_to_text(response.output_speech.ssml))
except AttributeError:
pass
else:
# Card was set hard-coded in response, converting ssml to clean text anyway
try:
response.card = SimpleCard(title=convert_speech_to_text(response.card.title),
content=convert_speech_to_text(response.card.content))
except AttributeError:
pass
# Request and Response loggers
class RequestLogger(AbstractRequestInterceptor):
""" Log the alexa requests """
def process(self, handler_input):
# type: (HandlerInput) -> None
logger.info("ALEXA REQUEST: {}".format(handler_input.request_envelope).replace('\n', '\r'))
class ResponseLogger(AbstractResponseInterceptor):
""" Log the alexa responses """
def process(self, handler_input, response):
# type: (HandlerInput, Response) -> None
logger.info("ALEXA RESPONSE: {}".format(response).replace('\n', '\r'))
# localizations support: https://github.com/alexa/skill-sample-python-city-guide/blob/master/instructions
# /localization.md
class LocalizationInterceptor(AbstractRequestInterceptor):
""" Add function to request attributes, that can load locale specific data."""
def process(self, handler_input):
# type: (HandlerInput) -> None
locale = handler_input.request_envelope.request.locale
if DEBUG:
logger.info("LOCALE = {}".format(locale))
i18n = gettext.translation('data', localedir='locales', languages=[locale], fallback=True)
handler_input.attributes_manager.request_attributes["_"] = i18n.gettext
# Add locale interceptor to the skill
sb.add_global_request_interceptor(LocalizationInterceptor())
# Register built-in handlers
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(ExitIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
# Register intent handlers
# TODO
# Register exception handlers
sb.add_exception_handler(CatchAllExceptionHandler())
# Add card interceptor to the skill
sb.add_global_response_interceptor(AddCardInterceptor())
# Add log interceptors to the skill
sb.add_global_request_interceptor(RequestLogger())
sb.add_global_response_interceptor(ResponseLogger())
# Handler name that is used on AWS lambda
lambda_handler = sb.lambda_handler()
|
984,718 | f09150e569941bd8bebe45b3941f6badb3d1fc6e | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
import pickle
import warnings
warnings.filterwarnings("ignore")
def create_new_pipeline(params):
numerical_transformer = SimpleImputer(strategy='mean')
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('encoding', OneHotEncoder(drop='first'))
])
preprocessor = ColumnTransformer(
transformers=[
('numerical', numerical_transformer, numerical),
('categorical', categorical_transformer, categorical)
])
scaler = StandardScaler()
logreg = XGBClassifier(
n_jobs=-1,
random_state=42,
**params
)
pipeline = Pipeline(
steps=[
('preprocessing', preprocessor),
('scaling', scaler),
('model', logreg)
]
)
return pipeline
if __name__ == '__main__':
print('Importing data')
df = pd.read_csv('Placement_Data_Full_Class.csv',
index_col='sl_no').reset_index(drop=True)
print('Spliting data')
df_full_train, df_test = train_test_split(
df, test_size=0.2, random_state=42)
numerical = ['hsc_p', 'degree_p', 'ssc_p']
categorical = ['gender', 'ssc_b', 'hsc_b', 'hsc_s',
'degree_t', 'workex', 'specialisation']
classification_target = ['status']
regression_target = ['salary']
X = df_full_train[numerical+categorical]
y = pd.get_dummies(df_full_train[classification_target])['status_Placed']
params = {'learning_rate': 0.5272631578947369,
'max_depth': 6,
'n_estimators': 10,
'reg_alpha': 0.1,
'reg_lambda': 1.0}
print('Creating pipeline')
pipeline = create_new_pipeline(params)
print('Training model')
pipeline.fit(X, y)
print('Saving model')
with open('status_model.pickle', 'wb') as f:
pickle.dump((pipeline), f)
|
984,719 | c8c2a66700f78d63d579d6176c1d4e9a09c14dae | n=int(input())
a = [0]*(n+1)
a[0] = [0, 0, 0]
for i in range(n):
a[i+1]=list(map(int,input().split()))
flag = True
for i in range(n):
if abs(a[i+1][1]-a[i][1])+abs(a[i+1][2]-a[i][2]) > (a[i+1][0]-a[i][0]):
flag=False
elif (abs(a[i+1][1]-a[i][1])+abs(a[i+1][2]-a[i][2])-(a[i+1][0]-a[i][0]))%2==1:
flag=False
if flag:
print("Yes")
else:
print("No")
|
984,720 | 3363cde533b04af460ad80303f104163a18fc974 | import requests
import uuid
import getpass
import hashlib
import base64
from globals import *
# Logs in using a username and password
# The password is appended with a salt retrieved from the server and hashed
def login():
global userToken
global loggedIn
global baseURL
username = raw_input("Enter your username > ")
password = getpass.getpass("Enter your password > ")
if username and password:
salt = getSalt(username)
if not salt:
print "Failed to retieve salt."
return
password = hashlib.sha256(password+salt).hexdigest()
r = requests.post(baseURL+"/api/user/login/", json={'username': username, 'password': password})
if 'result' not in r.json():
print "Error: {}".format(r.json()['error'])
else:
if r.json()['result'] == "true":
userToken = r.json()['token']
loggedIn = True
loc = getLocation()
if loc != None:
print "You awake and find yourself in {}".format(worldmap[loc['mapindex']]['title'])
else:
print "Login succeded."
return
else:
print "Login failed. Probably a wrong password"
loggedIn = False
def move(parameters):
global loggedIn
global worldmap
if not loggedIn:
print "Please use the >login command first."
return
if len(parameters) < 1:
print "You need to specify a direction."
return
if parameters[0] in ['n', 'e', 's', 'w']:
r = requests.post(baseURL+"/api/user/move/", json={'token': userToken, 'direction': parameters[0]})
if 'result' not in r.json():
print r.json()['error']
else:
if r.json()['result'] == "true":
loc = r.json()['location']
print "You travel {}".format(parameters[0])
print " === New Location === "
print worldmap[loc['mapindex']]['title']
print worldmap[loc['mapindex']]['description']
print "--------------------"
else:
print "You can't go this way."
print r.json()['error']
if "item" in r.json()['error']:
print "You need a tool of the type: {}".format(worldmap[r.json()['location']['mapindex']]['requireditems'][0])
else:
print "Direction needs to be <n/e/s/w>."
def inventory(parameters):
global loggedIn
global items
if not loggedIn:
print "Please use the >login command first."
return
r = requests.post(baseURL+"/api/user/inventory/", json={'token': userToken})
if 'items' not in r.json():
print "Error: {}".format(r.json()['error'])
else:
if len(r.json()['items']) < 1:
print "You don't seem to have anything on you."
return
# Create an array the same as the items
myitems = [0] * len(items)
# For each item found, add to the count
for i in r.json()['items']:
myitems[i['id']] += i['count']
# Loop through the myitems
print " ==== Inventory ==== "
for i in range(len(myitems)):
# if there's one, just print it
if myitems[i] == 1:
print items[i]['name']
print items[i]['description']
print "Damage: {}".format(items[i]['damage'])
print "Type: {}".format(items[i]['type'])
print "-----------------"
elif myitems[i] > 1:
# if there's multiple print the plural version
print "{} {}".format(myitems[i], items[i]['plural'])
print items[i]['description']
print "Damage: {}".format(items[i]['damage'])
print "-----------------"
def printHelp():
print " ==== Help ==== "
print " Welcome to Muddy Pyddle "
print " <> denotes optional values"
print " [] denotes mandatory values"
print " === Commands === "
print "quit/exit/q - exit the program"
print "help/? - this helpful information"
print "login - login to your account"
print "register - register a new account"
print " === Requires Login === "
print "inv/i - look at your inventory"
print "stats - look at your stats"
print "quests - list the quests available where you are"
print "location/loc - where is your character"
print "look/l <n/e/s/w> - take a look around"
print "move/go [n/e/s/w] - move in a direction"
# Get the quests, checks for login
def quests(parameter):
global userToken
global loggedIn
if not loggedIn or not userToken:
print "Please use the >login command first."
return
if not parameter:
getQuests()
# take quests
# Retireves and prints the quests from the server
def getQuests():
global baseURL
r = requests.post(baseURL+"/api/quests/", json= {'token' : userToken})
if 'quests' not in r.json():
print "Error: {}".format(r.json()['error'])
else:
print " === Quests Available === "
for quest in r.json()['quests']:
print "Title > {}".format(quest['title'])
print "Description > {}".format(quest['description'])
print "QuestID > {}".format(quest['questID'])
print "--------------------"
# Get the users stats
def stats():
global loggedIn
if not loggedIn:
print "Please use the >login command first."
return
else:
r = requests.post(baseURL+"/api/user/stats/", json={'token': userToken})
if 'error' in r.json():
print "Error: {}".format(r.json()['error'])
else:
print " ==== Stats ==== "
print "Strength - {}".format(r.json()['strength'])
print "Fortitude - {}".format(r.json()['fortitude'])
print "Charisma - {}".format(r.json()['charisma'])
print "Wisdom - {}".format(r.json()['wisdom'])
print "Dexterity - {}".format(r.json()['dexterity'])
print "-----------------"
# Get the location of the user from the server
def location():
global userToken
global loggedIn
global worldmap
if not loggedIn:
print "Please use the >login command first."
return
else:
loc = getLocation()
if loc != None:
# // FIX
print " === Current Location === "
print worldmap[loc['mapindex']]['title']
print worldmap[loc['mapindex']]['description']
print "--------------------"
else:
print "Couldn't locate your character."
# Get the location, returns an object with ['x'] and ['y'] fields
def getLocation():
global userToken
try:
r = requests.post(baseURL+"/api/user/location/", json={'token': userToken})
if 'location' not in r.json():
print "Error: {}".format(r.json()['error'])
return None
else:
return r.json()['location']
except requests.ConnectionError:
print "Couldn't connect to the server."
return None
def look(parameters):
global loggedIn
# If they're not logged in
if not loggedIn:
print "Please use the >login command first."
return
# get the location of the character
loc = getLocation()
# check the location
if loc == None:
print "Location couldn't be retrieved."
return
# If there's no parameters given
if len(parameters) < 1:
print worldmap[loc['mapindex']]['here']
return
# If the parameters are correct
if parameters[0] in ['n', 'e', 's', 'w']:
print worldmap[loc['mapindex']][parameters[0]]
return
else:
print "Direction must be <n/e/s/w>."
# Download the map and a list of items
def getStartData():
global worldmap
global worldheight
global worldwidth
global items
try:
r = requests.get(baseURL+"/api/world/map/")
if 'worldmap' not in r.json():
return False
else:
worldmap = r.json()['worldmap']
worldheight = r.json()['height']
worldwidth = r.json()['width']
r = requests.get(baseURL+"/api/world/items/")
if 'items' not in r.json():
return False
else:
items = r.json()['items']
return True
except requests.ConnectionError:
print "Couldn't connect to the server."
return False
# Retrieves and returns the salt of a given username
def getSalt(username):
try:
r = requests.post(baseURL+"/api/user/salt/", json={'username': username})
if 'salt' not in r.json():
print "Error: {}".format(r.json()['error'])
return None
else:
return r.json()['salt']
except requests.ConnectionError:
print "Couldn't connect to the server."
return None
# Registers a new user, will generate a salt and send a hashed password+salt to the server
# Does not login.
def register():
username = raw_input("Enter a username > ")
password = getpass.getpass("Enter a password > ")
password2 = getpass.getpass("Enter the password again > ")
if password != password2:
print "Passwords don't match, try again."
return
if len(username) < 4 or len(password) < 6:
print "Username must be greater than 4 characters and password must be greater than 6 characters."
return
salt = uuid.uuid4().hex
password = hashlib.sha256(password+salt).hexdigest()
try:
r = requests.post(baseURL+"/api/user/register/", json={'username' : username, 'password': password, 'salt': salt})
if 'result' not in r.json():
print "Error: {}".format(r.json()['error'])
else:
if r.json()['result'] == "true":
print "Account registered! You can now >login"
except requests.ConnectionError:
print "Couldn't connect to the server."
|
984,721 | be1af26f932a6251da984de2f31f7ca9c1196af9 | import nose
from nose.plugins.attrib import attr
# nose decors and attr
def copy_attrs(source, to):
for attr in dir(source):
if attr.startswith('_'):
continue
if attr.startswith('func_'):
continue
to.__setattr__(attr, getattr(source, attr))
def one(func):
def created_in_one():
print("\nin one {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_one, 'hello', None)))
#print(dir(func))
print(dir(created_in_one))
# for attr in dir(created_in_one):
# if attr.startswith('_'):
# continue
# if attr.startswith('func_'):
# continue
# func.__setattr__(attr, getattr(created_in_one, attr))
copy_attrs(created_in_one, func)
func()
print("out one {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_one, 'hello', None)))
created_in_one.__name__ = func.__name__
return created_in_one
def two(func):
def created_in_two():
#print(dir(func))
print(dir(created_in_two))
print("in two {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_two, 'hello', None)))
copy_attrs(created_in_two, func)
func()
print("out two {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_two, 'hello', None)))
created_in_two.__name__ = func.__name__
return created_in_two
def three(func):
def created_in_three():
#print(dir(func))
print(dir(created_in_three))
print("in two {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_three, 'hello', None)))
func()
print("out two {} {} {}".format(func.__name__, getattr(func, 'hello', None), getattr(created_in_three, 'hello', None)))
created_in_three.__name__ = func.__name__
return created_in_three
@attr("world")
@attr("hello")
@one
@two
@three
def test_a():
print("a - start")
print(dir(test_a))
# @one
# @two
# @attr("hello")
# def test_b():
# print("b - start")
|
984,722 | 64cc5ce01544d6e1df80eb56287498cd348a542a | '''
implements packing problem approximation how for a set of rectangles choose the rect which will best cover them.
'''
from utils import transpose, findfirst
from rect import Rect
class AlgorithmError(RuntimeError): pass
class PackingAlgorithm:
'''
base class for algorithms
finding a rects arrangement
'''
def __init__(self, rects):
self.rects = rects
self.size = 0, 0
def compute(self):
pass
def minWidth(self):
return max(rect.width for rect in self.rects)
def minHeight(self):
return max(rect.height for rect in self.rects)
def minAreaBound(self):
'''minArea >= minWidth * minHeight'''
return sum(rect.area for rect in self.rects)
@property
def fillingCoef(self):
'covered area / rect area ratio'
sheetArea = self.size[0] * self.size[1]
return sum(rect.width * rect.height for rect in self.rects) / float(sheetArea)
def transpose(self):
'transposing problem can be usefull for some positioning strategies'
for rect in self.rects:
rect.transpose()
self.size = transpose(self.size)
def shrinkSize(self):
width = max(rect.right for rect in self.rects)
height = max(rect.bottom for rect in self.rects)
self.size = width, height
class SmallestWidthAlgorithm(PackingAlgorithm):
'''
approximation which tries to fill rects into the rect with predefined width.
'''
def __init__(self, rects):
PackingAlgorithm.__init__(self, rects)
self.highest = 0
self.actualX = 0
self.actualY = 0
def _startNewRow(self):
self.actualX = 0
self.actualY = self.highest
def _placeRect(self, rect):
rect.topleft = self.actualX, self.actualY
self.actualX = rect.right
self.highest = max(self.highest, rect.bottom)
def compute(self, width=0):
if width == 0:
width = self.minWidth()
self._sortRects()
rects = self.rects[:]
while rects:
actualRect = findfirst(lambda rect: rect.width + self.actualX <= width, rects)
if not actualRect:
if self.actualX == 0:
raise AlgorithmError('algorithm cannot place any remaining rect to ensure predefined width')
else:
self._startNewRow()
continue
rects.remove(actualRect)
self._placeRect(actualRect)
self.size = width, self.highest
return self.rects
def _sortRects(self):
self.rects.sort(key=lambda item: item.height, reverse=True)
|
984,723 | 0395d5bb03f8fb431375ca7eb282eb33b042acb0 | '''
8) Um valor inteiro positivo n é chamado de quadrado perfeito se existir
uma sequência de ímpares consecutivos a partir do valor 1 cuja soma seja
exatamente igual a n.
Exemplo: para o valor 16 temos 16 = 1 + 3 + 5 + 7.
Assim sendo, 16 é um quadrado perfeito.
Logo, um quadrado perfeito tem a seguinte propriedade: o número de termos
ímpares consecutivos m a partir do valor 1 cuja soma é igual ao quadrado
perfeito corresponde à raiz quadrada do quadrado perfeito.
No exemplo acima, para n=16, o valor de m é 4, o que corresponde à
raiz quadrada de 16.
Faça um programa que solicite ao usuário a digitação de um número.
Este programa deve:
a) Verificar se valor digitado pelo usuário é um quadrado perfeito.
Se o valor digitado pelo usuário não for um quadrado perfeito,
dê uma mensagem ao usuário.
b) Se o valor digitado pelo usuário for um quadrado perfeito,
determine o valor de sua raiz quadrada (m) de acordo com o procedimento
descrito acima e imprima na tela.
'''
n = int(input('Digite um numero: '))
somatorio = 0
i = 1
m = 0
while (somatorio < n):
somatorio += i
m+=1 # Contando a qtd de impares
i+=2
if somatorio==n:
print('Quadrado Perfeito')
print('Raiz quadrada = ', m)
else:
print('NÃO é um quadrado perfeito')
|
984,724 | 1ec1b75609817b7a3d52c68ae5cc0b029c76204e | from collections import defaultdict
from collections import deque
from heapq import *
def ImportGraph(graph):
# Import a graph from a adjacency list
v = open(graph,'r')
Graph = defaultdict(list)
# distance = []
for line in v.readlines():
VerDis = []
d = deque(x for x in line.split())
v1 = d.popleft()
for z in d:
for x in z.split(","):
VerDis.append(x)
# distance.append(int(y))
i = 0
while i < len(VerDis):
Graph[v1].append((int(VerDis[i+1]),VerDis[i]))
i+=2
v.close()
return Graph
def dijkstra(g, first, destination):
q, visited = [(0,first,"The Shortest Path:")], []
while 1:
(cost,vertex,path) = heappop(q)
if vertex not in visited:
visited.append(vertex)
path = path + "->" + vertex
if vertex == destination:
return (cost, path)
for cost2, vertex2 in g.get(vertex, ()):
if vertex2 not in visited:
heappush(q, (cost+cost2, vertex2, path))
#return float("inf")
if __name__ == "__main__":
graph = ImportGraph("DijkstraData.txt")
print(dijkstra(graph, "1", "7"))
print(dijkstra(graph, "1", "37"))
print(dijkstra(graph, "1", "59"))
print(dijkstra(graph, "1", "82"))
print(dijkstra(graph, "1", "99"))
print(dijkstra(graph, "1", "115"))
print(dijkstra(graph, "1", "133"))
print(dijkstra(graph, "1", "165"))
print(dijkstra(graph, "1", "188"))
print(dijkstra(graph, "1", "197"))
|
984,725 | 868d022c03ac8df07b29d0a4eec215218844c7ed | # MIT License
# Copyright (c) 2018-2020 Dr. Jan-Philip Gehrcke
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import logging
import sys
import shutil
import textwrap
import os
from datetime import datetime
from types import SimpleNamespace
NOW = datetime.utcnow()
TODAY = NOW.strftime("%Y-%m-%d")
OUTDIR = None
FIGURE_FILE_PATHS = {}
log = logging.getLogger(__name__)
_CFG = SimpleNamespace()
_EPILOG = """
Performs analysis on CI build information
"""
def CFG():
return _CFG
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Performs Buildkite CI data analysis",
epilog=textwrap.dedent(_EPILOG).strip(),
)
parser.add_argument("--output-directory", default=TODAY + "_report")
# parser.add_argument("--resources-directory", default="resources")
# parser.add_argument("--pandoc-command", default="pandoc")
subparsers = parser.add_subparsers(
help="service-specific entry points", dest="command", metavar="service"
)
parser_bk = subparsers.add_parser("bk", help="Buildkite")
parser_bk.add_argument("org", help="The org's slug (simplified lowercase name)")
parser_bk.add_argument(
"pipeline", help="The pipeline's slug (simplified lowercase name)"
)
parser_bk.add_argument(
"--ignore-builds-shorter-than", type=int, help="Number in seconds"
)
parser_bk.add_argument(
"--ignore-builds-longer-than", type=int, help="Number in seconds"
)
parser_bk.add_argument(
"--ignore-builds-before",
type=str,
help="Ignore builds that ended before this date",
metavar="YYYY-MM-DD",
)
parser_bk.add_argument(
"--multi-plot-only",
action="store_true",
help="Do not write individual figure files, but only the multi plot figure",
)
# >>> parser.parse_args(["--foo", "f1", "--foo", "f2", "f3", "f4"])
# Namespace(foo=['f1', 'f2', 'f3', 'f4'])
parser_bk.add_argument(
"--multi-plot-add-step-duration",
type=str,
help="Add a duration plot for these step keys",
action="extend",
nargs="+",
)
args = parser.parse_args()
if args.ignore_builds_before:
try:
datetime.strptime(args.ignore_builds_before, "%Y-%M-%d")
except ValueError as exc:
sys.exit("bad --ignore-builds-before: " + str(exc))
log.info("command line args: %s", json.dumps(vars(args), indent=2))
if os.path.exists(args.output_directory):
if not os.path.isdir(args.output_directory):
log.error(
"The specified output directory path does not point to a directory: %s",
args.output_directory,
)
sys.exit(1)
log.info("Remove output directory: %s", args.output_directory)
shutil.rmtree(args.output_directory)
log.info("Create output directory: %s", args.output_directory)
os.makedirs(args.output_directory)
_CFG.args = args
return args |
984,726 | 9f1529aa0a3c40b22a575a70552e1c616256a6cc | from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
from flask_bootstrap import Bootstrap
import sqlite3 as sql
app = Flask(__name__)
Bootstrap(app)
@app.route('/')
def hello_world():
return "Hello World"
@app.route("/index/")
def index_page():
return render_template("index.html")
@app.route("/boot")
def boot_page():
return render_template("boot.html")
@app.route("/page/<string:message>")
def page_message(message):
return "You entered {0}".format(message)
@app.route("/number/<int:num>")
def number_num(num):
return "You entered {0}".format(num)
@app.route("/save/<string:name>/<string:addr>/<string:city>")
def save_data(name, addr, city):
with sql.connect("database.db") as con:
cur=con.cursor()
cur.execute("INSERT INTO students (name, address, city) VALUES (?, ?, ?)", [name, addr, city])
con.commit()
return "Record Successfully added {0} {1} {2}".format(name, addr, city)
@app.route("/list")
def list_data():
con = sql.connect("database.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("SELECT * FROM students")
rows = cur.fetchall()
return render_template("list.html", rows = rows)
@app.route("/student")
def new_student():
render_template("student.html")
return render_template("student.html")
@app.route("/addrec", methods=["POST"])
def addrec():
if request.method =="POST":
name = request.form["nm"]
addr=request.form["add"]
city=request.form["cty"]
with sql.connect("database.db") as con:
cur=con.cursor()
cur.execute("INSERT INTO students (name, address, city) VALUES (?, ?, ?)", [name, addr, city])
con.commit()
return render_template("list.html")
#def create_database():
# conn = sql.connect("database.db")
# conn.execute("CREATE TABLE students (name TEXT, address TEXT, city TEXT)")
# conn.close()
#create_database()
if __name__ =='__main__':
app.run(debug=True)
|
984,727 | e6874a5d09ffc8f714108a3bf7a33faf0ca95155 | /home/cliffordten/anaconda3/lib/python3.7/bisect.py |
984,728 | 629c76a7195ea83fd17d84831777226732f3ea54 |
# -*- coding: utf-8 -*-
# @Date : 2018-10-13 10:45:50
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
from sys import stdin
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
MOD = 998244353
def add(a, b):
a += b
if a < 0: a += MOD
if a >=MOD: a -= MOD
return a
len1, len2 = read_ints()
a = read_str()
b = read_str()
pw, res, ans = 1, 0, 0
for i in range(len2):
if i < len1 and a[len1 - i - 1] == "1":
res += add(res, pw)
if (b[len2 - i - 1] == "1"):
ans = add(ans, res)
print(ans)
|
984,729 | 7e21701461435459326e47199bf7f9bbc19bd406 | # 325. 和等于 k 的最长子数组长度
# https://leetcode-cn.com/problems/maximum-size-subarray-sum-equals-k/
class Solution(object):
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if not nums:
return 0
tmpSum = {0:0}
preSum = 0
result = 0
for i, num in enumerate(nums):
preSum += num
if preSum not in tmpSum:
tmpSum[preSum] = i + 1
if preSum - k in tmpSum:
result = max(result, i + 1 - tmpSum[preSum - k])
return result
s = Solution()
assert s.maxSubArrayLen( [1, -1, 5, -2, 3], 3) == 4
assert s.maxSubArrayLen([-2, -1, 2, 1], 1) == 2 |
984,730 | 4d62685b10422a4835e01b32ede9e069b22dcb95 | /home/Ritik-Gupta/anaconda3/lib/python3.7/os.py |
984,731 | e339418ede6f09a031b5ccb0b2ba96d6381c8400 | from pwn import *
import re
from base64 import b64decode
context.log_level = 'error' # Disable non error related messages
host, port = 'tasks.aeroctf.com', '44323'
def oracle(salt=''):
while True:
try:
r = remote(host, port)
r.recv(4096)
r.sendline('3')
r.recv(4096)
r.sendline(salt)
data = r.recv(4096).decode()
b64 = data.split("'")[1]
return b64decode(b64.encode())
except:
continue
break
def offset():
compare = len(oracle())
for x in range(1, 16):
if len(oracle(salt='a' * x)) != compare:
return x
offset = 16 - 10 # As expected because AERO{32} => (6 + 32) % 16
word_bank = ['A', 'a', 'b', 'c', 'd', 'e', 'f', 'r', 'o', '{', '}',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '\x00']
def same_block(data):
m = set()
for x in range(0, len(data), 16):
t = data[x:x + 16]
if t in m:
return True
m.add(t)
return False
plain = ''
for b in range(3):
block = ''
for x in range(15, -1, -1):
if b == 0:
pad = 'a' * x
elif x == 0:
pad = ''
else:
pad = plain[-x:]
for word in word_bank:
exploit = pad + block + word + 'a' * x
data = oracle(salt=exploit)
if same_block(data):
block += word
break
plain += block
print(plain)
|
984,732 | bb7e88bb311d6aac13f27c7709311bf237a8f024 | '''
376. Wiggle Subsequence
A sequence of numbers is called a wiggle sequence if the differences
between successive numbers strictly alternate between positive and
negative. The first difference (if one exists) may be either positive
or negative. A sequence with fewer than two elements is trivially a
wiggle sequence.
For example, [1,7,4,9,2,5] is a wiggle sequence because the differences
(6,-3,5,-7,3) are alternately positive and negative. In contrast,
[1,4,7,2,5] and [1,7,4,5,5] are not wiggle sequences, the first because
its first two differences are positive and the second because its last
difference is zero.
Given a sequence of integers, return the length of the longest subsequence
that is a wiggle sequence. A subsequence is obtained by deleting some
number of elements (eventually, also zero) from the original sequence,
leaving the remaining elements in their original order.
Examples:
Input: [1,7,4,9,2,5]
Output: 6
The entire sequence is a wiggle sequence.
Input: [1,17,5,10,13,15,10,5,16,8]
Output: 7
There are several subsequences that achieve this length. One is
[1,17,10,13,10,16,8].
Input: [1,2,3,4,5,6,7,8,9]
Output: 2
Follow up:
Can you do it in O(n) time?
'''
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
m = len(nums)
if m < 2:
return m
# find the first set of non-equal contiguous numbers
index = 1
while not nums[index] - nums[index - 1] and index < m - 1:
index += 1
if nums[index] != nums[index - 1]:
increasing = nums[index] > nums[index - 1]
else:
return 1
count = 2
# assume f(nums[0 : i - 1]) renders maximum length
# and the last two numbers indicates an 'increasing' or
# 'decreasing' pattern
for i in range(index + 1, m):
if (increasing and nums[i] < nums[i - 1]) or \
(not increasing and nums[i] > nums[i - 1]):
count += 1
increasing = not increasing
return count
def test():
numsSet = [[3,3,3,2,5], [1,1,7,4,9,2,5], []]
sol = Solution()
for nums in numsSet:
print(nums)
print(sol.wiggleMaxLength(nums))
print("##########################")
if __name__ == "__main__":
test()
|
984,733 | 3fa94e83881b5b6f7898815fd8490abaf76a5cc8 | # a program to divide excel sheets columns in seperate excel sheets.
import openpyxl
sal_workbook = openpyxl.Workbook()
salary_head_list = ['PR_AIDA','PR_IBASIC','PR_IBONUS','PR_ITS','PR_IDA','DAY_AMT','DAY_OFF','PR_IDIRTY','EL_AMT','PR_EL','PR_DELEC','PR_DEPF','PR_IHRA','HRS_AMT','PR_DIT','PR_DLWPVAL','PR_LWB','PR_DMESSAL','N_CARE','HRS','PR_DOTH_1','PR_IOTALW','P_CARE','PR_DPF','PR_ISP','PR_DOTHER','PR_LICAMT','PR_IUNIFOR','PR_DVOLPF','PR_IWASHIN']
# read given columns from the main workbook and create excel sheet
def create_excel_sheets(workbook, sheet_name, columns, head_name ):
workbook = openpyxl.load_workbook(workbook)
sheet = workbook.get_sheet_by_name(sheet_name)
new_sheet = sal_workbook.create_sheet(title= head_name)
new_sheet.cell(row=1,column=1).value="EmpCode"
new_sheet.cell(row=1,column=2).value= head_name
count = 0
for rows in sheet:
if count == 0:
count += 1
continue
count += 1
new_sheet.cell(row=count,column=1).value = rows[columns[0]].value
new_sheet.cell(row=count, column=2).value = rows[columns[1]].value
sal_workbook.save('D:/july/salary_workbook.xlsx')
def columns_map(filename, sheet_name):
workbook = openpyxl.load_workbook(filename)
sheet = workbook.get_sheet_by_name(sheet_name)
for row in sheet.rows:
count = 0
for column in row:
col_name = column.value
if col_name == 'PR_NEWCODE':
count += 1
continue
if col_name in salary_head_list:
create_excel_sheets(filename, sheet_name,[9, count],col_name)
count += 1
break
if __name__ == '__main__':
file = 'D:\\Software\\Software\\HR Module\\Salary\\Salary July 2017.xlsx'
sheet_name = 'JUL17'
columns_map(file, sheet_name)
|
984,734 | 9b10015004590e367767ca7f6ed8b1a4ca78fbb8 | import pandas as pd
import os
import sys
import random
from mq.celery import app
from .models import Employee, AsyncResults
from django.http import HttpResponse
from django.conf import settings
@app.task(bind=True)
def createCSV(self, amount):
columns = ['id', 'gender', 'education_level', 'relationship_status', 'growth_rate', 'unit', 'attrition_rate']
employees = Employee.objects.all()[:int(amount)].values(*columns)
employees_df = pd.DataFrame.from_records(employees, columns=columns)
filename = str(random.randint(1000000, 100000000000)) + '.csv'
file_path = os.path.join(settings.MEDIA_ROOT, filename)
employees_df.to_csv(file_path, index=False)
try:
result = 200
async_result = AsyncResults.objects.create(task_id=self.request.id, result=result, location=file_path, filename=filename)
async_result.save()
except:
result = str(sys.exc_info()[0])
async_result = AsyncResults.objects.create(task_id=task_id, result=result)
async_result.save()
|
984,735 | 7bf3170866a846c96ec45efa575e4a7ee53b4503 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from .settings import DEBUG, MEDIA_ROOT, STATIC_ROOT
admin.autodiscover()
urlpatterns = patterns('',
# drive app
url(r'^', include('drive.urls')),
# auth-related URLs
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/loggedout/'}, name='logout'),
url(r'^switchuser/$', 'django.contrib.auth.views.logout_then_login', name='switchuser'),
url(r'^loggedout/$', 'gilgidrive.views.loggedout'),
url(r'^changepassword/$', 'django.contrib.auth.views.password_change', name='changepassword'),
url(r'^passwordchanged/$', 'django.contrib.auth.views.password_change_done'),
# password reset urls (not working)
#url(r'^resetpassword/$', 'django.contrib.auth.views.password_reset'),
#url(r'^resetsent/$', 'django.contrib.auth.views.password_reset_done'),
#url(r'^setnewpassword/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm'),
#url(r'^setnewpassword/[0-9A-Za-z]+-.+/$', 'django.contrib.auth.views.password_reset_confirm'),
#url(r'^resetcomplete/$', 'django.contrib.auth.views.password_reset_complete'),
# admin docs
#url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# admin
url(r'^admin/', include(admin.site.urls)),
)
# DEV ONLY!!!!!!!!!!
#
# this magic code snippet allows the dev server to serve anything in media/ and static/
if DEBUG:
urlpatterns += patterns('',
#url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT,}),
#url(r'^static/admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': STATIC_ROOT + '/admin/',}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': STATIC_ROOT,})
)
|
984,736 | c4ea0e506051c8635eb107c5bdbb58f22be91588 | from django import forms
from customer.models import Contact, Exchange
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = '__all__'
class ExchangeForm(forms.ModelForm):
class Meta:
model = Exchange
fields = '__all__'
|
984,737 | 8bdb856694537ca832a11c552975de6cc6883e25 | from django.views.generic import ListView
from .models import Course
class ListCourse(ListView):
model = Course
template_name = 'index.html'
context_object_name = 'courses'
|
984,738 | fce885fefec190a1a2fe44aeb4879845cf6f80da | from django import forms
from django.contrib.auth.models import User
from django.core.validators import validate_email, RegexValidator
from models import *
from django.forms import FileInput, TextInput, Textarea
MAX_UPLOAD_SIZE = 2500000
class EditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['first_name', 'last_name','email','age','bio']
class EditPhoto(forms.Form):
photo = forms.ImageField(required=False)
class PostForm(forms.Form):
text = forms.CharField(max_length=160)
image= forms.ImageField(required=False)
class CommentForm(forms.Form):
commenttext = forms.CharField(max_length=50, required=True, widget=Textarea(attrs={'class': "form-control",
'placeholder': "Comments Here",
'maxlength': 50,
'rows': 2}))
def clean(self):
cleaned_data = super(CommentForm, self).clean()
commenttext = cleaned_data.get('commenttext')
if commenttext and len(commenttext) > 50:
raise forms.ValidationError("The length of comment exceed the maximum 50 requirement")
return self.cleaned_data
class RegistrationForm(forms.Form):
username = forms.CharField(max_length=30)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.CharField(max_length=50,validators = [validate_email])
password1 = forms.CharField(max_length = 200,
label='Password',
widget = forms.PasswordInput())
password2 = forms.CharField(max_length = 200,
label='Confirm password',
widget = forms.PasswordInput())
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 and password2 and password1 != password2 :
raise forms.ValidationError("Password Fail to match.")
return cleaned_data
def clean_username(self):
username = self.cleaned_data.get('username')
if User.objects.filter(username__exact=username):
raise forms.ValidationError("Username is already taken.")
return username
|
984,739 | 7a5780c6b994f53baa31113ada4f86a7f85dc4a3 | class Solution:
def sol(self, mat, first):
mat, move = [i.copy() for i in mat], 0
for j in range(len(mat)):
for i in range(len(mat[0])):
if (j and mat[j - 1][i]) or (not j and 1 << i & first):
move += 1
if j > 0:
mat[j - 1][i] ^= 1
if j + 1 < len(mat):
mat[j + 1][i] ^= 1
if i > 0:
mat[j][i - 1] ^= 1
if i + 1 < len(mat[0]):
mat[j][i + 1] ^= 1
mat[j][i] ^= 1
return 1e9 if sum(sum(i) for i in mat) else move
def minFlips(self, mat: List[List[int]]) -> int:
ans = min(self.sol(mat, i) for i in range(2 ** len(mat[0])))
return -1 if ans == 1e9 else ans
|
984,740 | 3fe1c87f3134db2ee0d5bd2f073e6bec34d4e8ae | from application.extensions.admin.views.base import BaseView
from application.extensions.admin.views.index import AdminIndexView
from application.extensions.admin.views.user import UserView
|
984,741 | f7f67df80189c4ca13025541501baaef9233959b | #!/usr/bin/env python
#coding=utf-8
"""
pipline input
"""
#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function
#import argparse
import shutil
#import sys
import os
import json
import glob
from datetime import date, timedelta
from time import time
import random
import pandas as pd
import numpy as np
import tensorflow as tf
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/common/')
sys.path.append(os.getcwd())
from data.data_reader import input_fn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("num_threads", 16, "Number of threads")
tf.app.flags.DEFINE_integer("feature_size", 0, "Number of features")
tf.app.flags.DEFINE_integer("field_size", 0, "Number of fields")
tf.app.flags.DEFINE_integer("embedding_size", 32, "Embedding size")
tf.app.flags.DEFINE_integer("num_epochs", 10, "Number of epochs")
tf.app.flags.DEFINE_integer("batch_size", 64, "Number of batch size")
tf.app.flags.DEFINE_integer("log_steps", 1000, "save summary every steps")
tf.app.flags.DEFINE_float("learning_rate", 0.0005, "learning rate")
tf.app.flags.DEFINE_float("l2_reg", 0.0001, "L2 regularization")
tf.app.flags.DEFINE_string("loss_type", 'log_loss', "loss type {square_loss, log_loss}")
tf.app.flags.DEFINE_string("optimizer", 'Adam', "optimizer type {Adam, Adagrad, GD, Momentum}")
tf.app.flags.DEFINE_string("deep_layers", '256,128,64', "deep layers")
tf.app.flags.DEFINE_string("dropout", '0.5,0.5,0.5', "dropout rate")
tf.app.flags.DEFINE_boolean("batch_norm", False, "perform batch normaization (True or False)")
tf.app.flags.DEFINE_float("batch_norm_decay", 0.9, "decay for the moving average(recommend trying decay=0.9)")
tf.app.flags.DEFINE_string("data_dir", '', "data dir")
tf.app.flags.DEFINE_string("dt_dir", '', "data dt partition")
tf.app.flags.DEFINE_string("model_dir", '', "model check point dir")
tf.app.flags.DEFINE_string("servable_model_dir", '', "export servable model for TensorFlow Serving")
tf.app.flags.DEFINE_string("task_type", 'train', "task type {train, infer, eval, export}")
tf.app.flags.DEFINE_boolean("clear_existing_model", False, "clear existing model or not")
tf.app.flags.DEFINE_string("model_type", 'deepfm', "choose which model for train")
def main(_):
tr_files = glob.glob("%s/tr*libsvm" % FLAGS.data_dir)
random.shuffle(tr_files)
print("tr_files:", tr_files)
va_files = glob.glob("%s/va*libsvm" % FLAGS.data_dir)
print("va_files:", va_files)
te_files = glob.glob("%s/te*libsvm" % FLAGS.data_dir)
print("te_files:", te_files)
if FLAGS.clear_existing_model:
try:
shutil.rmtree(FLAGS.model_dir)
except Exception as e:
print(e, "at clear_existing_model")
else:
print("existing model cleaned at %s" % FLAGS.model_dir)
model_params = {
"field_size": FLAGS.field_size,
"feature_size": FLAGS.feature_size,
"embedding_size": FLAGS.embedding_size,
"learning_rate": FLAGS.learning_rate,
"batch_norm_decay": FLAGS.batch_norm_decay,
"l2_reg": FLAGS.l2_reg,
"deep_layers": FLAGS.deep_layers,
"dropout": FLAGS.dropout
}
config = tf.estimator.RunConfig().replace(session_config = tf.ConfigProto(device_count={'GPU':0, 'CPU':FLAGS.num_threads}),
log_step_count_steps=FLAGS.log_steps, save_summary_steps=FLAGS.log_steps)
model_lib = 'model.' + model_type
print ('train use model' , model_lib)
model_fn = importlib.import_module(model_lib).model_fn
Model = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, params=model_params, config=config)
if FLAGS.task_type == 'train':
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size))
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)
tf.estimator.train_and_evaluate(Model, train_spec, eval_spec)
elif FLAGS.task_type == 'eval':
Model.evaluate(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size))
elif FLAGS.task_type == 'infer':
preds = Model.predict(input_fn=lambda: input_fn(te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys="prob")
with open(FLAGS.data_dir+"/pred.txt", "w") as fo:
for prob in preds:
fo.write("%f\n" % (prob['prob']))
elif FLAGS.task_type == 'export':
feature_spec = {
'feat_ids': tf.placeholder(dtype=tf.int64, shape=[None, FLAGS.field_size], name='feat_ids'),
'feat_vals': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.field_size], name='feat_vals')
}
serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
Model.export_savedmodel(FLAGS.servable_model_dir, serving_input_receiver_fn)
|
984,742 | b44310f43ccae1be9896e7509d88aba3508b9e6e | from . import auth
from webapp import db, bcrypt
from flask_login import login_user, logout_user, current_user, login_required
from ..models import User, Post
from flask import render_template, url_for, flash, redirect
from .forms import blog_form, registrationForm, loginForm
@auth.route('/signup', methods=['POST', 'GET'])
def signUp():
if current_user.is_authenticated:
return redirect(url_for('auth.signUp'))
form = registrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to login ', 'success')
return redirect(url_for('auth.signIn'))
return render_template('signUp.html', form= form, title='signUp')
@auth.route("/login", methods=['POST', 'GET'])
def signIn():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = loginForm()
if form.validate_on_submit():
user = User.query.filter_by(email= form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('main.home'))
else:
flash('login unsuccessful. please check your email or password.', 'danger')
return render_template('signIn.html', form= form, title="signIn")
@auth.route("/logout")
def signOut():
logout_user()
return redirect(url_for('main.home'))
@auth.route('/blogs')
def blogs():
posts= Post.query.all()
return render_template('pitches.html', posts=posts)
@auth.route('/post/new', methods=['POST', 'GET'])
@login_required
def post():
form =blog_form()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash( " post has been created", "success" )
return redirect(url_for('auth.blogs'))
return render_template('post.html',form=form)
|
984,743 | 1b844f6e02166119f3c5cfb490ef5d26299e1373 | import cv2
import matplotlib.pyplot as plt
order =22
img = cv2.imread('C:\\Users\\39796\Desktop\Ambient Occlosion Paper\Experiment\\%s-nnao.png' % order)
img_median = cv2.medianBlur(img, 3)
# cv2.imwrite('C:\\Users\\39796\Desktop\Ambient Occlosion Paper\Experiment\\%s-nnao_blur.png' %order, img_median)
# img_mean = cv2.blur(img, (5,5))
img_bilater = cv2.bilateralFilter(img,18,85,85)
cv2.imwrite('C:\\Users\\39796\Desktop\Ambient Occlosion Paper\Experiment\\%s-nnao_blur_bilater.png' %order, img_bilater)
# img_Guassian = cv2.GaussianBlur(img,(5,5),0)
# plt.subplot(121)
# plt.imshow(img)
#
# plt.subplot(122)
# plt.imshow(img_bilater)
# plt.show() |
984,744 | a29be1112ca807540277046c1a9dda6aad3aed11 | import engine
import history
import ds18b20
import config
import json
import utils
from flask import Flask, request
from flask.ext.restful import Resource, Api
from flask import render_template
__author__ = 'Tom'
app = Flask(__name__)
api = Api(app)
@app.route('/')
def index():
CONFIG = config.load()
return render_template('index.html',
config=CONFIG,
timetable=CONFIG["timetable"])
class ThermostatConfig(Resource):
def get(self, key):
CONFIG = config.load()
if key is None:
return CONFIG, 200
elif key not in CONFIG:
return {"result": "key " + key + " not found"}, 404
return CONFIG[key]
def put(self, key):
CONFIG = config.load()
if key in CONFIG:
print(request.form['data'])
CONFIG[key] = json.loads(request.form['data'])
config.save(CONFIG)
return {"result": "ok"}, 201
else:
return {"result": "key " + key + " not found"}, 404
class Temperature(Resource):
def get(self):
return round(ds18b20.readtemperature(), 1)
class Engine(Resource):
def get(self, key):
return {
"currenttarget": engine.gettargettemperature(),
'run': engine.run(),
}.get(key, ({"result": "key " + key + " not found"}, 404))
class History(Resource):
def get(self, limit=10):
return history.read(limit)
api.add_resource(ThermostatConfig, '/api/<string:key>')
api.add_resource(Temperature, '/api/temperature')
api.add_resource(Engine, '/api/engine/<string:key>')
api.add_resource(History, '/api/history/<int:limit>')
if __name__ == '__main__':
app.debug = True
app.run(host="0.0.0.0", port=80, debug=True)
|
984,745 | d5ea315775614bf6872b0ceaa27b3fa6a6695733 | import random
import numpy as np
from args import *
class Q_Agent():
def reset(self):
self.Q = np.zeros((4, 2))
def get_action(self, s):
if random.random() < eps:
return random.randint(0, 1)
else:
return np.argmax(self.Q[tuple([s])])
def update_Q(self, s, a, r, s2):
s = tuple([s]); s2 = tuple([s2]); a = tuple([a])
s_a = s + a
self.Q[s_a] += alpha * (r + (gamma * np.max(self.Q[tuple(s2)])) - self.Q[s_a])
class Double_Q_Agent():
def reset(self):
self.Q1 = np.zeros((4, 2))
self.Q2 = np.zeros((4, 2))
def get_action(self, s):
if random.random() < eps:
return random.randint(0, 1)
else:
return np.argmax(self.Q1[tuple([s])] + self.Q2[tuple([s])])
def update_Q(self, s, a, r, s2):
s = tuple([s]); s2 = tuple([s2]); a = tuple([a])
s_a = s + a
if random.random() < 0.5:
s2_max_a = s2 + tuple([np.argmax(self.Q1[s2])])
self.Q1[s_a] += alpha * (r + (gamma * self.Q2[s2_max_a]) - self.Q1[s_a])
else:
s2_max_a = s2 + tuple([np.argmax(self.Q2[s2])])
self.Q2[s_a] += alpha * (r + (gamma * self.Q1[s2_max_a]) - self.Q2[s_a])
|
984,746 | d34096397277153406164c54d52f290260cb1bf0 | print("code for Assignment 1 :")
disp= dict()
for i in range(0,3):
name=input("Enter Name :")
usn= input("Enter Usn :")
disp[usn]=name
print(disp)
#for key,value in disp.items():
#print(key,':',value)
|
984,747 | f5cc6d43057bf4be41fa82b040ea6f70a359b05a | first_x14=[[5.733508782899592e-07, -9.792499896753948e-13, -9.978517529264487e-14, 2.110890707154146e-12, -7.144083544313336e-11, -8.836524221634837e-16, 4.414569062938014e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.6124679052430716e-11, 1325127513898.5303, 3285474337242.722], [6.759769751753592e-07, -4.4585624345198636e-13, -1.047095901223355e-14, 5.99369605577349e-14, -2.968874709307344e-11, -7.319160779641687e-16, 5.1470022686346514e-17, 7.380109316273403e-06, -5.260741533604885e-07, -7.970119723160097e-10, 7.466142406941075e-11, 2867082925419.5986, 3445318039266.0684], [2.8229502867899e-07, -3.0527780090883247e-13, -8.778240430738341e-14, 8.306139696543698e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 5.5404168398351446e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.412855283853855e-11, 3671220552531.4546, 3517003118089.3027], [6.565091013964766e-07, -2.647293146626374e-13, -9.786335258220626e-14, 4.7533522905539054e-12, -4.819418734702914e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.754051206704872e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.372920794923787e-11, 2520852108894.8896, 5216318614365.096], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 9.287612328040984e-18, 5.7664959761093685e-06, -3.0217391933218974e-07, -2.355335355612979e-09, 5.87758358357815e-12, 3889707410776.0127, 4969076733505.051], [3.006931299564841e-08, -5.353214400182556e-13, -7.323674089198226e-14, 1.4544438683021653e-12, -1.4769735680065943e-11, -2.658501861186624e-17, 1.6619931378442954e-17, 6.610416236687641e-06, -1.7567203171820354e-07, -5.556311931208314e-10, 5.292301707973015e-11, 2816788850563.9653, 6642234982453.244], [5.2854453952853376e-08, -9.82129351843824e-13, -5.467017300603423e-14, 7.480059640590764e-12, -3.7409001486466135e-11, -9.260313514419191e-16, 9.326865995542488e-17, 8.303020037670824e-06, -1.5012048591541416e-07, -7.909615152023537e-09, 3.8467186493341434e-11, 5354472251214.254, 5886818863014.152], [1.560656784593446e-07, -6.417160706539648e-14, -5.5649813962962523e-14, 2.1772190901814123e-12, -8.687061388361772e-12, -2.351908953649683e-16, 8.660680519142658e-17, 7.784657471602897e-06, -2.1623400033306314e-08, -5.198812125881622e-09, 8.112428041820533e-11, 3741911722984.543, 7598060804018.263], [9.988816722078529e-07, -6.501436405310303e-13, -3.24248355793262e-14, 2.4030684482937e-12, -9.905457583614745e-11, -6.0079135836738185e-16, 3.068835356892944e-17, 4.571110887765432e-06, -1.5715203071295402e-07, -3.815179276869973e-10, 9.624518402577202e-11, 4719755629064.356, 11051751077943.895], [5.420389319404943e-07, -8.883316115186763e-13, -3.185602321995129e-14, 5.01581076054135e-12, -2.7729295995975057e-11, -3.8152145399630157e-16, 4.736504046698289e-17, 1.3934626098796321e-06, -5.21347249238398e-07, -2.540756658468136e-09, 9.880636698772421e-11, 7672090766771.888, 11560525255584.617], [2.1266320349135235e-07, -4.3302354654598405e-13, -2.151653077446246e-14, 9.172849160629877e-12, -8.869444528044474e-11, -5.445695475111853e-16, 6.58221518081471e-18, 9.526037449819795e-06, -1.7723140181473784e-07, -9.122319825061993e-09, 1.8871148895474168e-11, 8732992139667.833, 11899752860132.979], [7.77670086688964e-09, -7.262036142461742e-13, -6.592073553469757e-14, 5.4135148026096486e-12, -3.6596651599181066e-11, -1.909565411149502e-16, 2.7145076512919608e-17, 8.967500935930275e-06, -6.822024651927961e-07, -4.512337257775556e-10, 4.636610283374809e-11, 8294423437141.268, 13065652631464.145], [7.789796495644357e-07, -8.414802101717935e-13, -1.4900093109348255e-14, 7.371820299533076e-12, -8.818580330534732e-13, -4.3717393394620656e-16, 4.071231042114076e-17, 2.1024273976709743e-06, -1.5003076454265462e-07, -8.339725308019884e-09, 4.8461315560000974e-11, 9700513518264.773, 12944666107127.785], [1.3987154831914293e-07, -3.5795177476345686e-13, -7.815563112718038e-14, 7.678086888549384e-13, -1.7045151477204002e-11, -6.392110950351097e-16, 8.388806445312012e-17, 1.2519638176142278e-06, -4.1474142504451513e-07, -7.224769736159364e-10, 6.554723175652988e-12, 9879549797994.39, 14859762245977.035], [7.15490592872916e-08, -6.220634370967929e-13, -3.6054489563917383e-14, 5.693928818478827e-12, -7.195883601674982e-11, -9.462881935318785e-16, 8.928573248035271e-17, 9.956788096434975e-06, -7.240947638113685e-07, -2.524623766534897e-09, 9.932488754139714e-11, 10758528366096.889, 16166452689605.535], [5.664718167167099e-07, -1.3278287177190518e-13, -2.001516781082948e-14, 6.30963753443993e-12, -8.784720226636233e-11, -3.946617350830404e-16, 4.7949365997825985e-17, 2.019003440811227e-06, -3.0207599318296974e-07, -6.8168803324317184e-09, 4.5153256347570805e-11, 14076479257026.697, 21655898164342.477], [9.553187794171832e-07, -9.198697122928365e-13, -3.6623387938905975e-14, 2.53848689553254e-12, -6.0260720198811236e-12, -4.897480552220685e-16, 4.5435589533613936e-17, 4.4613969491995745e-06, -7.245936686107763e-07, -4.799514753127343e-10, 6.841232506196834e-11, 14332879303842.453, 23484395367389.332], [3.4027742858013696e-07, -2.7567691473002953e-13, -3.1542433647023185e-14, 5.6449724312082665e-12, -9.920139002774583e-11, -7.576903348257841e-16, 2.1121983472076654e-17, 9.699041167336082e-06, -7.507445855690184e-07, -1.8285500206771666e-09, 3.7011770420519673e-11, 19980447826956.168, 32497573129495.195], [3.214489750920658e-07, -8.930269729584898e-13, -6.116078610306497e-17, 2.8351129880177914e-12, -1.787262306466895e-11, -1.1623220070038122e-16, 7.473162695018654e-17, 1.585512923513518e-06, -3.125089309127821e-07, -9.04991222516019e-09, 5.68506728899925e-11, 20195988346966.797, 32967374880464.855], [8.288950702532548e-07, -4.709682496048529e-15, -4.256063805407111e-14, 9.105457843079445e-13, -5.591177843007147e-11, -3.698365826273389e-16, 6.5968393424225e-18, 9.114508602279829e-06, -8.57540833984797e-07, -2.8166078216175385e-09, 5.777912495354073e-11, 28057245323999.82, 48435901382352.6], [3.372696400858852e-07, -5.27817020946249e-13, -6.91947192011283e-15, 4.5598130586184646e-12, -9.623426572466953e-13, -6.200852652181951e-16, 5.427865069735762e-17, 3.5661025789876956e-07, -6.930164406908556e-07, -1.2001485312944993e-09, 7.852433744200693e-12, 35101902109633.75, 60795411030021.516], [7.013111376720136e-07, -7.463916754531805e-13, -6.510294826335273e-14, 9.82200064548761e-12, -8.607051263732655e-11, -4.830149950389245e-16, 3.590734044522462e-17, 1.6019299511837672e-06, -5.567777186115107e-07, -5.425241254336125e-09, 1.592126509867795e-11, 36978893032053.7, 64141241407945.65], [3.1897172560857545e-07, -5.767108873244325e-13, -2.623412648510314e-14, 3.351243813212279e-14, -6.478417001425142e-11, -5.860895455023451e-16, 7.814433424552305e-17, 3.818815565480326e-06, -9.204025820705918e-07, -2.880658203612906e-09, 7.945045591428673e-11, 42472061001989.086, 74953862086755.92], [1.1324545704566235e-07, -1.8615374843289034e-13, -1.3620828061379032e-14, 6.290450565618076e-12, -3.206735634915657e-11, -4.720011066233777e-16, 5.542131369449319e-17, 6.151735860258644e-06, -6.472713877397806e-07, -8.93217820988329e-09, 2.2770881525690024e-11, 55078391556288.92, 97591107311291.72], [9.78454960024751e-07, -3.889061240694455e-13, -4.288877468150159e-14, 3.1947551495116965e-12, -2.089944362361814e-11, -6.878991796409284e-16, 1.400316352865455e-17, 4.163284967649611e-06, -6.770074859472697e-07, -7.330513436615759e-09, 9.578024269281738e-12, 60131206506355.46, 106233780208482.23], [6.730294872065228e-07, -7.263701852948662e-13, -2.1754038266955166e-14, 3.304414434580307e-12, -6.925577190534569e-11, -3.3415655617535e-16, 3.5718922287355333e-17, 1.7101810910523476e-06, -9.583413431913767e-07, -1.928963851990949e-09, 2.5416765256297103e-11, 61470492168374.94, 111392264146488.34], [6.625048712548385e-07, -3.102037943671703e-13, -2.5056464053665518e-14, 8.50642088993742e-12, -5.073276928466353e-11, -4.058632909513459e-16, 7.835783453721274e-17, 4.879057196999071e-06, -7.463484404888983e-07, -7.566230089863562e-09, 1.9832177515753546e-11, 63496343152288.69, 114039145405162.72], [4.3558183340712995e-07, -1.8333773002484576e-13, -3.099809066651078e-14, 4.437932195590246e-12, -5.894445641688982e-11, -8.915330511488532e-16, 4.562811484253275e-17, 4.562097970209531e-06, -8.933635498376032e-07, -4.240418461202612e-09, 2.3346573024924168e-11, 65477097244990.875, 115790211476386.75], [1.7692762595442167e-08, -2.0121260570984455e-13, -5.1516866309586096e-14, 9.898170806956085e-13, -8.997532503936981e-11, -8.061999901512827e-16, 2.946030798431142e-17, 2.666939914647112e-06, -6.49349883050122e-07, -9.555843736312365e-09, 1.663203370647437e-12, 80852878466466.81, 143945423658556.25], [7.778379983003765e-07, -9.51163351541514e-13, -9.364428761780998e-14, 6.181750430972449e-12, -2.7966489452183032e-11, -7.889952687816706e-16, 8.25757935020156e-17, 5.006760443132673e-06, -9.37850647625561e-07, -6.580286268016213e-09, 8.741801247314973e-12, 93706155241233.58, 168904804095970.34]]
second_x14 = [[6.088982408471882e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 4.414569062938014e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.87758358357815e-12, 772800450685.3794, 1074903034581.8103], [3.006931299564841e-08, -5.334993652225994e-13, -7.323674089198226e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.4235388393942845e-16, 3.880676183879715e-17, 9.754051206704872e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.372920794923787e-11, 1008538480471.9463, 922345002727.5042], [6.063237694798799e-07, -1.1244597029986758e-13, -9.849223478538833e-14, 3.778275786724121e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 5.7664959761093685e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.9136183427000215e-12, 1782165307288.4954, 1150024464909.3079], [5.748338134215041e-07, -9.792499896753948e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -7.077150737567345e-11, -8.757391515009966e-16, 4.414569062938014e-17, 5.7421498238142795e-06, -3.0217391933218974e-07, -2.3342857546140607e-09, 5.6124679052430716e-11, 1776843660399.1191, 1558474520593.6626], [5.733508782899592e-07, -9.792499896753948e-13, -5.5252557691301074e-14, 2.116472308968923e-12, -6.755664331322433e-11, -8.822727940038677e-16, 4.414569062938014e-17, 5.7664959761093685e-06, -3.048034921333341e-07, -2.355335355612979e-09, 5.6124679052430716e-11, 1839824842059.0203, 1633495978787.121], [6.600214710589736e-07, -5.353214400182556e-13, -7.323674089198226e-14, 1.4442702298007812e-12, -4.8442008946813423e-11, -3.436378002606512e-16, 1.6470768420373595e-17, 6.610416236687641e-06, -2.259982414812056e-08, -5.4953302935200226e-09, 5.372920794923787e-11, 1722303029868.927, 2438098323844.667], [6.565091013964766e-07, -2.647293146626374e-13, -9.786335258220626e-14, 2.110890707154146e-12, -4.819418734702914e-11, -8.922361799200051e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.6124679052430716e-11, 1322575371074.6543, 3276641807977.197], [2.8229502867899e-07, -5.353214400182556e-13, -7.323674089198226e-14, 1.4544438683021653e-12, -4.8484326318024165e-11, -2.658501861186624e-17, 5.5404168398351446e-17, 5.340108311994839e-06, -1.7567203171820354e-07, -6.045130273724476e-09, 5.292301707973015e-11, 2385867171914.306, 2114426403822.9023], [5.733508782899592e-07, -2.647293146626374e-13, -9.978517529264487e-14, 4.742751075502114e-12, -4.836808977349959e-11, -3.436378002606512e-16, 4.414569062938014e-17, 9.754051206704872e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.372920794923787e-11, 1577380000755.9722, 3945957516361.4985], [2.8229502867899e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 5.5404168398351446e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.412855283853855e-11, 2742039574005.0845, 2500588307338.968], [6.587508779781232e-07, -3.0527780090883247e-13, -8.778240430738341e-14, 8.306139696543698e-12, -4.850298852039086e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.754051206704872e-06, -2.259982414812056e-08, -6.045130273724476e-09, 5.372920794923787e-11, 2115468422622.0574, 4035743087241.3164], [6.544043277213628e-07, -4.4585624345198636e-13, -9.786335258220626e-14, 5.99369605577349e-14, -4.819418734702914e-11, -7.319160779641687e-16, 3.880676183879715e-17, 9.754051206704872e-06, -5.260741533604885e-07, -7.970119723160097e-10, 5.372920794923787e-11, 2919531827336.114, 3526792409457.4624], [2.8229502867899e-07, -4.4585624345198636e-13, -8.778240430738341e-14, 5.99369605577349e-14, -2.979744954553815e-11, -7.319160779641687e-16, 5.1470022686346514e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.466767105119406e-11, 3294372483933.142, 3092978739421.8784], [6.005091729540702e-07, -3.0527780090883247e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 9.287612328040984e-18, 5.79601560267464e-06, -2.420986057307696e-07, -6.053487215036327e-09, 8.412855283853855e-11, 3407093913390.0557, 3174994193950.052], [6.007468267187799e-07, -3.036446749128186e-13, -5.4802854525899444e-14, 3.776199085029705e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 9.287612328040984e-18, 5.7664959761093685e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.412855283853855e-11, 3412983951338.4834, 3181924257109.9233], [2.8229502867899e-07, -3.0705285664280247e-13, -7.323674089198226e-14, 1.4544438683021653e-12, -4.8484326318024165e-11, -2.658501861186624e-17, 1.6619931378442954e-17, 6.610416236687641e-06, -2.4392972783406837e-07, -6.045130273724476e-09, 5.2864897792196626e-11, 3203304137963.988, 3687155179101.1123], [6.808230006111051e-07, -9.792499896753948e-13, -1.0000459230147685e-13, 2.110890707154146e-12, -2.968874709307344e-11, -8.836524221634837e-16, 4.4262584839709436e-17, 7.350902008001596e-06, -5.260741533604885e-07, -8.049724175750259e-10, 7.466142406941075e-11, 3207545593088.343, 3833308499356.0205], [5.733508782899592e-07, -9.752421775939296e-13, -9.872672712640182e-14, 4.7533522905539054e-12, -7.158749321774249e-11, -3.436378002606512e-16, 4.414569062938014e-17, 9.509915939077591e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.3775073840901154e-11, 2434723577471.4644, 4952441976627.66], [2.8229502867899e-07, -3.0527780090883247e-13, -8.778240430738341e-14, 8.306139696543698e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 5.5404168398351446e-17, 5.379998404395464e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.412855283853855e-11, 3646510559267.529, 3484578424272.3564], [2.8229502867899e-07, -3.0527780090883247e-13, -8.778240430738341e-14, 8.306139696543698e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 5.5454606586819697e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.412855283853855e-11, 3671220552531.4546, 3517003118089.3027], [5.733508782899592e-07, -3.0527780090883247e-13, -9.978517529264487e-14, 2.110890707154146e-12, -7.144083544313336e-11, -8.836524221634837e-16, 5.5404168398351446e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.070188403006356e-09, 8.399726442378924e-11, 3672876870679.671, 3532364420483.4766], [2.8229502867899e-07, -3.0235408878116277e-13, -8.778240430738341e-14, 8.306139696543698e-12, -4.8484326318024165e-11, -9.046948991151852e-16, 5.5404168398351446e-17, 5.289117672310813e-06, -2.420986057307696e-07, -6.045130273724476e-09, 8.347187424898725e-11, 3746047061749.448, 3623401882634.325], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -6.692087520058559e-11, -4.126122939209153e-16, 9.287612328040984e-18, 5.784249831630346e-06, -3.0217391933218974e-07, -2.355335355612979e-09, 5.87758358357815e-12, 3886319286086.491, 4960782343786.212], [2.8229502867899e-07, -5.353214400182556e-13, -7.323674089198226e-14, 8.306139696543698e-12, -1.4769735680065943e-11, -9.046948991151852e-16, 5.5404168398351446e-17, 5.340108311994839e-06, -1.7567203171820354e-07, -5.556311931208314e-10, 8.412855283853855e-11, 2990428740316.431, 7448601203660.05], [6.759769751753592e-07, -4.4585624345198636e-13, -1.047095901223355e-14, 1.4544438683021653e-12, -2.968874709307344e-11, -7.319160779641687e-16, 1.6619931378442954e-17, 6.610416236687641e-06, -1.7567203171820354e-07, -5.556311931208314e-10, 7.466142406941075e-11, 3274958467745.425, 8226172457054.158], [3.006931299564841e-08, -4.4585624345198636e-13, -1.052780044790212e-14, 1.441469117067904e-12, -2.968874709307344e-11, -7.378719067934883e-16, 1.6619931378442954e-17, 7.380109316273403e-06, -1.745803776864225e-07, -5.556311931208314e-10, 7.4455585151216e-11, 3733650169247.1187, 9426220412523.135], [6.565091013964766e-07, -2.647293146626374e-13, -8.737399471612157e-14, 4.768663858866993e-12, -4.819418734702914e-11, -9.046948991151852e-16, 3.880676183879715e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 5.319920576513588e-11, 6112755209647.357, 7340605918309.047], [6.759769751753592e-07, -4.4585624345198636e-13, -9.978517529264487e-14, 2.110890707154146e-12, -2.9874844059223754e-11, -8.836524221634837e-16, 5.1470022686346514e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -7.970119723160097e-10, 7.466142406941075e-11, 4096609931652.035, 10681456719590.363], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 8.306139696543698e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 5.580979262225265e-17, 5.340108311994839e-06, -2.420986057307696e-07, -6.045130273724476e-09, 5.87758358357815e-12, 9686970712454.324, 14108451654325.338], [6.526716365952181e-07, -4.4846853790641076e-13, -9.786335258220626e-14, 5.99369605577349e-14, -2.968874709307344e-11, -3.409928965774377e-16, 3.880676183879715e-17, 7.319853550828825e-06, -5.260741533604885e-07, -5.529044647532495e-09, 5.372920794923787e-11, 15411313418633.568, 25201207759706.78]]
third_x14 = [[6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.87758358357815e-12, 739650207220.9751, 1061788733248.7014], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739698379011.5448, 1062400903293.3635], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.791927746991619e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 772537031518.6694, 1075346711694.6995], [5.733508782899592e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.755664331322433e-11, -8.822727940038677e-16, 4.393045464764083e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.5905015053998637e-09, 5.6022495785442904e-11, 927407918154.9799, 1037016288924.9664], [6.600214710589736e-07, -5.353214400182556e-13, -7.260936648628475e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.4701673454245684e-16, 1.651995247477222e-17, 9.714584975210482e-06, -1.7567203171820354e-07, -5.501866610764596e-09, 5.372920794923787e-11, 1008297856944.2361, 923571532970.2809], [3.006931299564841e-08, -1.1244597029986758e-13, -5.5252557691301074e-14, 4.716385409154961e-12, -6.692087520058559e-11, -4.11543164301811e-16, 4.414569062938014e-17, 9.754051206704872e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.372920794923787e-11, 1051959731420.8206, 948241112191.0159], [5.733508782899592e-07, -5.334993652225994e-13, -7.323674089198226e-14, 4.747552289674061e-12, -6.755664331322433e-11, -3.400712026586783e-16, 3.880676183879715e-17, 5.7664959761093685e-06, -3.048034921333341e-07, -2.355335355612979e-09, 5.6124679052430716e-11, 1155770399894.1113, 973450421367.5642], [6.092547277702558e-07, -1.1244597029986758e-13, -9.917852725282324e-14, 3.778275786724121e-12, -6.732880440929854e-11, -3.436378002606512e-16, 4.414569062938014e-17, 5.7664959761093685e-06, -3.048034921333341e-07, -2.3646058397533247e-09, 5.615073250070903e-11, 1165177214175.7458, 982711220487.6989], [6.088982408471882e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.131427845170008e-12, -6.744911855543395e-11, -4.0874842757446075e-16, 4.414569062938014e-17, 5.7664959761093685e-06, -3.048034921333341e-07, -2.5905015053998637e-09, 5.653239886390421e-11, 1395837339542.1511, 1222656178934.4692], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5225353996438374e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.87758358357815e-12, 1217327285994.517, 1480134413770.6023], [5.748338134215041e-07, -9.792499896753948e-13, -7.323674089198226e-14, 3.776199085029705e-12, -7.026233233828824e-11, -8.757391515009966e-16, 3.880676183879715e-17, 9.754051206704872e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.342857290131783e-11, 1440871838292.7446, 1218329146957.9568], [6.566572292762514e-07, -1.1244597029986758e-13, -7.387203051296472e-14, 1.4567778986197162e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 4.414440192557486e-17, 9.600724315067452e-06, -2.265591457415277e-08, -5.4953302935200226e-09, 5.87758358357815e-12, 1233990793469.7686, 1528266959536.7373], [6.558546692010097e-07, -5.353214400182556e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -4.8442008946813423e-11, -8.757391515009966e-16, 1.6466501977494325e-17, 6.614083236682412e-06, -3.0217391933218974e-07, -2.3342857546140607e-09, 5.372920794923787e-11, 1553294703379.832, 1360303215875.266], [6.063409718283155e-07, -1.1244597029986758e-13, -9.849223478538833e-14, 3.778275786724121e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.910705961819695e-17, 5.7664959761093685e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.9136183427000215e-12, 1782165307288.4954, 1150024464909.3079], [6.600214710589736e-07, -9.792499896753948e-13, -7.282156056126614e-14, 2.1320931068035054e-12, -4.8442008946813423e-11, -8.822727940038677e-16, 4.414569062938014e-17, 5.7664959761093685e-06, -2.259982414812056e-08, -5.4953302935200226e-09, 5.372920794923787e-11, 1666497099336.2883, 1672424254333.7893], [5.733508782899592e-07, -5.353214400182556e-13, -5.5252557691301074e-14, 1.4442702298007812e-12, -6.755664331322433e-11, -8.822727940038677e-16, 1.6470768420373595e-17, 5.794688973465824e-06, -2.259982414812056e-08, -5.4953302935200226e-09, 5.372920794923787e-11, 1666106521993.4026, 1684465829843.0337], [5.733508782899592e-07, -9.704578933395783e-13, -5.5252557691301074e-14, 3.761113746594267e-12, -7.077150737567345e-11, -8.822727940038677e-16, 4.415630623063991e-17, 5.7664959761093685e-06, -3.048034921333341e-07, -2.3342857546140607e-09, 5.6124679052430716e-11, 1819999953793.5757, 1611373938131.2234], [6.600214710589736e-07, -5.353214400182556e-13, -7.323674089198226e-14, 1.4442702298007812e-12, -4.8442008946813423e-11, -8.757391515009966e-16, 4.414569062938014e-17, 5.7421498238142795e-06, -3.0217391933218974e-07, -2.3342857546140607e-09, 5.372920794923787e-11, 1878936914458.034, 1683925200271.064], [6.063237694798799e-07, -5.353214400182556e-13, -7.323674089198226e-14, 1.4442702298007812e-12, -6.692087520058559e-11, -3.436378002606512e-16, 1.6470768420373595e-17, 6.610416236687641e-06, -2.259982414812056e-08, -5.529044647532495e-09, 5.372920794923787e-11, 1709749921574.329, 2388058220220.504], [3.006931299564841e-08, -5.32810934609694e-13, -7.323674089198226e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.436378002606512e-16, 3.880676183879715e-17, 6.610416236687641e-06, -2.274952461003577e-08, -5.4953302935200226e-09, 5.372920794923787e-11, 1720429413212.6597, 2433300622691.7944], [5.748338134215041e-07, -9.792499896753948e-13, -5.5252557691301074e-14, 3.776199085029705e-12, -7.077150737567345e-11, -3.436378002606512e-16, 1.6470768420373595e-17, 6.610416236687641e-06, -2.271131330673482e-08, -5.4953302935200226e-09, 5.372920794923787e-11, 1720907305151.3247, 2434524561858.4756], [6.600214710589736e-07, -5.358600651951768e-13, -7.323674089198226e-14, 1.4442702298007812e-12, -4.8442008946813423e-11, -3.436378002606512e-16, 1.6470768420373595e-17, 6.610416236687641e-06, -2.259982414812056e-08, -5.441355576792155e-09, 5.418750826323362e-11, 1755456030243.2717, 2553175288775.7803], [5.748338134215041e-07, -9.765636921356812e-13, -9.810322770824514e-14, 3.778275786724121e-12, -6.692087520058559e-11, -3.436378002606512e-16, 4.414569062938014e-17, 5.728240592157351e-06, -2.259982414812056e-08, -2.3119179112349236e-09, 5.9136183427000215e-12, 1671563748276.08, 3331226442886.6523], [6.088982408471882e-07, -5.353214400182556e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -4.8442008946813423e-11, -4.0874842757446075e-16, 1.6470768420373595e-17, 6.610416236687641e-06, -2.259982414812056e-08, -2.5905015053998637e-09, 5.87758358357815e-12, 1691924661730.3398, 3485253816404.5713], [6.043091946254862e-07, -1.1244597029986758e-13, -7.323674089198226e-14, 3.778275786724121e-12, -6.692087520058559e-11, -3.405384225543015e-16, 3.880676183879715e-17, 9.754051206704872e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.9136183427000215e-12, 2521809508294.743, 2683953515910.948], [5.748338134215041e-07, -9.792499896753948e-13, -7.323674089198226e-14, 3.776199085029705e-12, -1.4886492961013968e-11, -3.4235388393942845e-16, 4.414569062938014e-17, 9.754051206704872e-06, -3.0217391933218974e-07, -5.529044647532495e-09, 5.6124679052430716e-11, 2540079826095.8623, 2824940738267.114], [2.995797469746595e-08, -5.4064269469584e-13, -7.323674089198226e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.442021602954018e-16, 1.6470768420373595e-17, 9.754051206704872e-06, -2.2674268702003365e-08, -5.448809754810759e-09, 5.372920794923787e-11, 2590865574215.4575, 5412556418660.499], [6.063237694798799e-07, -1.1244597029986758e-13, -9.849223478538833e-14, 2.110890707154146e-12, -6.625716321864197e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -2.259982414812056e-08, -2.5905015053998637e-09, 5.87758358357815e-12, 2781819756531.3594, 6718169257662.06], [3.006931299564841e-08, -5.334993652225994e-13, -7.323674089198226e-14, 4.7533522905539054e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 5.7664959761093685e-06, -1.7415816874817353e-07, -5.476363878440178e-09, 5.898125640770321e-12, 4937798990980.663, 6112619152288.509], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.778275786724121e-12, -6.692087520058559e-11, -3.436378002606512e-16, 4.414569062938014e-17, 5.7664959761093685e-06, -1.8710838555912812e-07, -5.529044647532495e-09, 5.9136183427000215e-12, 5485731386187.003, 7036256267134.406]]
fourth_x14 = [[6.014464680560663e-07, -1.1191041566144248e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.864585905734438e-12, 739725608599.0449, 1061602092687.5742], [6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739441966334.2465, 1062307692549.2767], [6.063237694798799e-07, -1.1293886611272421e-13, -5.5252557691301074e-14, 3.791927746991619e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739698379011.5232, 1062400903293.474], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.45658234459829e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8695159041644556e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 740239699936.4174, 1063930085510.972], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 735224738657.5258, 1075780203972.633], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.1273885631961156e-12, -6.704093840757301e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.910071994080754e-12, 772562903537.4742, 1075302948436.6924], [6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.638682575982141e-11, -4.11543164301811e-16, 3.880676183879715e-17, 9.84752748513975e-06, -1.7561633190725933e-07, -2.5905015053998637e-09, 5.87758358357815e-12, 753348986749.1305, 1270835428498.1807], [5.733508782899592e-07, -1.1171011116599058e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.801004507434372e-11, -8.822727940038677e-16, 4.393045464764083e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.5905015053998637e-09, 5.653959617689789e-11, 918489875570.2732, 1036120020632.2256], [6.063237694798799e-07, -1.1244597029986758e-13, -7.260936648628475e-14, 2.110890707154146e-12, -1.4886492961013968e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.799193365393156e-06, -1.7392046356858582e-07, -5.500677540186618e-09, 5.372920794923787e-11, 991801799172.9209, 939687693626.4626], [6.063237694798799e-07, -1.1244597029986758e-13, -5.559366350407699e-14, 2.110890707154146e-12, -6.692087520058559e-11, -8.822727940038677e-16, 4.367568437160303e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.5905015053998637e-09, 5.6022495785442904e-11, 927407918154.858, 1037016288925.825], [6.600214710589736e-07, -5.353214400182556e-13, -7.260936648628475e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.443817491915234e-16, 1.651995247477222e-17, 9.714584975210482e-06, -1.7567203171820354e-07, -5.501866610764596e-09, 5.3476510781643526e-11, 1009240622209.2537, 921598623641.8162], [5.729082858738335e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.755664331322433e-11, -8.822727940038677e-16, 4.393045464764083e-17, 9.577392202821747e-06, -3.0256053169373756e-07, -2.5905015053998637e-09, 5.548253125269424e-11, 925900484987.028, 1041109885357.8557], [3.027694006225868e-08, -1.1308040815342369e-13, -5.5252557691301074e-14, 4.7533522905539054e-12, -6.753068970855316e-11, -3.4701673454245684e-16, 4.414569062938014e-17, 9.714584975210482e-06, -1.7567203171820354e-07, -5.501866610764596e-09, 5.372920794923787e-11, 1008297856911.4817, 923571532874.8936], [6.600214710589736e-07, -5.353214400182556e-13, -7.260936648628475e-14, 4.731971783781849e-12, -1.4886492961013968e-11, -3.4701673454245684e-16, 1.651995247477222e-17, 9.714584975210482e-06, -1.7629848372051628e-07, -5.506813302899659e-09, 5.372920794923787e-11, 1011530312633.89, 920760782209.3392], [5.733508782899592e-07, -1.1244597029986758e-13, -7.260936648628475e-14, 2.110890707154146e-12, -1.4886492961013968e-11, -3.4844731679023794e-16, 4.393045464764083e-17, 9.509915939077591e-06, -1.7567203171820354e-07, -5.539943034657847e-09, 5.5738866237681894e-11, 1026959851579.987, 908025674923.6334], [3.003896474064257e-08, -1.1244597029986758e-13, -5.5252557691301074e-14, 4.716385409154961e-12, -6.692087520058559e-11, -8.822727940038677e-16, 4.414569062938014e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.568650599974601e-09, 5.372920794923787e-11, 960252365987.242, 1044848486667.0359], [3.006931299564841e-08, -1.1244597029986758e-13, -5.4788115916331794e-14, 2.110890707154146e-12, -6.755664331322433e-11, -4.11543164301811e-16, 4.4286593564094294e-17, 9.754051206704872e-06, -1.7457330461199142e-07, -5.529044647532495e-09, 5.6022495785442904e-11, 1024962904926.3763, 962110416568.8533], [6.600214710589736e-07, -1.1339287424343937e-13, -5.5252557691301074e-14, 4.716385409154961e-12, -6.692087520058559e-11, -4.11543164301811e-16, 1.651995247477222e-17, 9.714584975210482e-06, -1.7567203171820354e-07, -5.464194248059143e-09, 5.372920794923787e-11, 1036490879504.932, 951211620907.323], [3.015611193052464e-08, -1.1244597029986758e-13, -5.473976987477058e-14, 4.716385409154961e-12, -6.650788236284558e-11, -8.822727940038677e-16, 4.414569062938014e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.5905015053998637e-09, 5.372920794923787e-11, 969883543946.2631, 1046524903537.8717], [3.0059085313503545e-08, -1.1244597029986758e-13, -5.5252557691301074e-14, 4.716385409154961e-12, -6.685555867966872e-11, -4.11543164301811e-16, 4.414569062938014e-17, 9.754051206704872e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.372920794923787e-11, 1051959731420.8529, 948241112191.1215], [5.733508782899592e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.791927746991619e-12, -6.755664331322433e-11, -8.822727940038677e-16, 3.880676183879715e-17, 9.537660725033725e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 1101939453212.7285, 1266603561494.5737], [5.733508782899592e-07, -1.1204746627913476e-13, -5.5252557691301074e-14, 2.1065855431351167e-12, -6.692087520058559e-11, -8.822727940038677e-16, 4.393045464764083e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.87758358357815e-12, 1107398151892.8062, 1265433453697.6475], [6.063237694798799e-07, -1.1258573287550426e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.686422350505466e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -3.0169709558316906e-07, -2.5905015053998637e-09, 5.913048697659596e-12, 2061702578871.105, 2352784200000.023], [6.600214710589736e-07, -5.353214400182556e-13, -5.563693336222758e-14, 4.7533522905539054e-12, -1.4886492961013968e-11, -3.472655652449316e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.377526120076952e-11, 1500943864487.1636, 3714992054259.6978], [2.990764603594353e-08, -1.1246692099044494e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 4.414569062938014e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.372920794923787e-11, 1501192153400.7183, 3714753463593.064], [5.733508782899592e-07, -1.13394107609416e-13, -9.887316997889571e-14, 2.1274167252256886e-12, -6.692087520058559e-11, -3.436378002606512e-16, 4.393045464764083e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.6022495785442904e-11, 1590233918588.147, 3941386860795.0195], [6.600214710589736e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4701673454245684e-16, 3.880676183879715e-17, 9.714584975210482e-06, -1.7567203171820354e-07, -5.501866610764596e-09, 5.87758358357815e-12, 2522596890253.0264, 2681287748476.4336], [6.063984617943396e-07, -1.1169346215323233e-13, -5.5252557691301074e-14, 2.1074863533712645e-12, -6.647251302560937e-11, -3.413736604018024e-16, 4.414569062938014e-17, 9.509915939077591e-06, -1.7567203171820354e-07, -5.529044647532495e-09, 5.9136183427000215e-12, 2647001118496.6226, 2845436756366.2114], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.1056254434545198e-12, -6.692087520058559e-11, -8.822727940038677e-16, 4.3635886892127107e-17, 9.509915939077591e-06, -3.0256053169373756e-07, -2.599267190073158e-09, 5.868775966728897e-12, 2957350544297.7295, 3321665521243.0386], [3.006931299564841e-08, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.681456897637195e-11, -8.822727940038677e-16, 4.393045464764083e-17, 9.754051206704872e-06, -3.0256053169373756e-07, -5.529044647532495e-09, 5.372920794923787e-11, 3782554013937.9595, 4264809838817.5312]]
fifth_x14 = [[6.063237694798799e-07, -1.127453968282589e-13, -9.887316997889571e-14, 3.791927746991619e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.418089776028339e-06, -1.8710838555912812e-07, -2.6026475400648895e-09, 5.9136183427000215e-12, 748573276024.763, 1041662421841.7045], [6.046655275219584e-07, -1.1293886611272421e-13, -5.5305966770512544e-14, 2.1193113082991266e-12, -6.704093840757301e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8835615865244628e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 741277425241.2795, 1052586987139.5159], [6.063237694798799e-07, -1.1244597029986758e-13, -9.801236647230628e-14, 2.1297802778399837e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.881868058537983e-17, 9.430764467765806e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 745049666741.3059, 1047560637635.6809], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4028047819357316e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 737819748372.696, 1061721115287.7607], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.467625724828445e-06, -1.8695159041644556e-07, -2.5905015053998637e-09, 5.861867412318882e-12, 742755249215.5266, 1054926994420.7028], [6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.4093897110194413e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 738137272746.8845, 1061835501399.9567], [6.063237694798799e-07, -1.1293886611272421e-13, -5.493828355697966e-14, 3.791927746991619e-12, -6.692087520058559e-11, -3.4118122506504107e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 738254167274.3577, 1061877664490.2145], [6.063237694798799e-07, -1.1191041566144248e-13, -9.887316997889571e-14, 2.0985089720627187e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.8419075407910955e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.608262180123283e-09, 5.8273345311812325e-12, 743105751142.2904, 1055185540500.3018], [6.014464680560663e-07, -1.1127054905876621e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.864585905734438e-12, 739725608599.0449, 1061602092687.5742], [6.063237694798799e-07, -1.1293886611272421e-13, -5.5252557691301074e-14, 3.791927746991619e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8712485861116616e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739461053511.3625, 1062174765246.2133], [6.056008272007564e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.751361999580538e-11, -3.436378002606512e-16, 3.8563983966144254e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739441966333.8219, 1062307692547.6848], [6.063237694798799e-07, -1.1212322295312995e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 739441966334.2465, 1062307692549.2767], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 3.791927746991619e-12, -6.704093840757301e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.910071994080754e-12, 739718864807.1947, 1062349791371.2067], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.1273885631961156e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9545425022691434e-12, 739462802943.5924, 1062992287330.2861], [6.014464680560663e-07, -1.1191041566144248e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.647339031938553e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.57950157029277e-09, 5.8681419090700685e-12, 737815128253.8563, 1065429828896.7644], [6.063237694798799e-07, -1.1232851595823465e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.628688666389271e-11, -3.45658234459829e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8695159041644556e-07, -2.5905015053998637e-09, 5.864585905734438e-12, 740522706487.3506, 1063221979154.6807], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.110890707154146e-12, -6.704093840757301e-11, -3.4455837686940916e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.8726402095077137e-07, -2.612211862498047e-09, 5.910071994080754e-12, 739218821943.8381, 1066834083402.0623], [6.014464680560663e-07, -1.1293886611272421e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.442871502513762e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8626306910490044e-07, -2.5905015053998637e-09, 5.864585905734438e-12, 739100466397.6221, 1068617330902.7922], [6.063237694798799e-07, -1.1293886611272421e-13, -5.474938341030652e-14, 3.791927746991619e-12, -6.692087520058559e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.873344558534096e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 735441564251.486, 1073862289719.3893], [6.063237694798799e-07, -1.1191041566144248e-13, -5.5252557691301074e-14, 2.1129336608108088e-12, -6.692087520058559e-11, -3.4114835252643343e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.8710838555912812e-07, -2.5804143333409836e-09, 5.864585905734438e-12, 732449945883.5417, 1078241898415.758], [6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.110890707154146e-12, -6.692087520058559e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 734975401361.5648, 1075696885275.9512], [6.063237694798799e-07, -1.1293886611272421e-13, -5.5252557691301074e-14, 3.791927746991619e-12, -6.666066859922888e-11, -3.4416690848805237e-16, 3.880676183879715e-17, 9.578019753616566e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 735224738657.702, 1075780203973.4678], [6.063237694798799e-07, -1.1324261912105374e-13, -5.5137054713222466e-14, 2.1191073761402393e-12, -6.704093840757301e-11, -3.436378002606512e-16, 3.880676183879715e-17, 9.591713475488146e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.926873745210312e-12, 734049421967.2506, 1078675436036.3041], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 3.779941316809585e-12, -6.692087520058559e-11, -3.4506845955709053e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8557022933795127e-07, -2.587198202611048e-09, 5.876297361120167e-12, 738189616984.4592, 1075948039753.5764], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.110890707154146e-12, -6.704093840757301e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8780939547206662e-07, -2.5905015053998637e-09, 5.910071994080754e-12, 773857476582.7517, 1070337448354.0193], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.1273885631961156e-12, -6.704093840757301e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.877758292975681e-07, -2.5905015053998637e-09, 5.910071994080754e-12, 773793638998.2312, 1070571622540.5764], [6.063237694798799e-07, -1.1244597029986758e-13, -9.887316997889571e-14, 2.129026298119308e-12, -6.704093840757301e-11, -4.125441593979983e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8710838555912812e-07, -2.5905015053998637e-09, 5.9136183427000215e-12, 774562252522.0753, 1076204420562.6389], [6.004215798270089e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.1273885631961156e-12, -6.704093840757301e-11, -4.0874842757446075e-16, 3.886123661313709e-17, 9.578019753616566e-06, -1.8710838555912812e-07, -2.5742368530694446e-09, 5.910071994080754e-12, 764136235934.092, 1092771172486.2751], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5252557691301074e-14, 2.110890707154146e-12, -6.692087520058559e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8604979812886935e-07, -2.5839311228033397e-09, 5.910071994080754e-12, 769482161082.116, 1085180665089.7242], [6.063237694798799e-07, -1.1244597029986758e-13, -5.5137054713222466e-14, 2.1273885631961156e-12, -6.673271667319884e-11, -4.0874842757446075e-16, 3.880676183879715e-17, 9.509915939077591e-06, -1.8571361756069841e-07, -2.5905015053998637e-09, 5.910071994080754e-12, 770228538885.1401, 1085650579050.7341]]
|
984,748 | 90b717056df8cb9daa4164d3dd0cbde4cbbbb4d5 | employees_happiness = list(map(int, input().split()))
factor = int(input())
factored_happiness = list(map(lambda n: n * factor, employees_happiness))
average_happiness = sum(factored_happiness) / len(factored_happiness)
filtered_employees = list(filter(lambda n: n >= average_happiness, factored_happiness))
happy_employees = len(filtered_employees)
total_employees = len(factored_happiness)
if happy_employees >= total_employees / 2:
print(f'Score: {happy_employees}/{total_employees}. Employees are happy!')
else:
print(f'Score: {happy_employees}/{total_employees}. Employees are not happy!') |
984,749 | e5140cc2bb8893a30329cec80550765bc78cbc9e | from Utilities.util import Util
from Utilities.filegenerator.CAMT053InputData import CAMT053InputData
from Utilities.filegenerator.CAMT053Tags import CAMT053Tags
from datetime import date
from datetime import datetime
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
from resources.config import ApplicationConfig
import os
from Utilities.FTPTransferImpl import FTPTransferImpl
import vkbeautify as vkb
import shutil
from pathlib import Path
import inspect
class CAMT053FileProcessing():
outputFileName = ""
paramFilePath = ""
camtFilepath = ""
custID = ""
path = ""
multiple = False
Bal_Ccy = ""
ftpUtils = FTPTransferImpl()
xpath_prtryCode = "(//Prtry/Cd)[%s]"
xpath_RealAcctId = "//Stmt//Acct/Id//Othr/Id"
xpath_DbtrAcct = "(//UltmtDbtr//Othr/Id)[%s]"
xpath_CdtrAcct = "(//UltmtCdtr//Othr/Id)[%s]"
xpath_SubFmlyCd = "(//Fmly/SubFmlyCd)[%s]"
iBANFlag = False
random = "MSG-" + date.today().isoformat()
def generateCAMT053(self, realAccount, transactionAccount, camtinput):
iBANFlag = ""
CAMT053FileProcessing.outputFileName = "AutoCAMT053" + Util.get_unique_number(5)
CAMT053InputData.Random = CAMT053FileProcessing.random + "-" + Util.get_unique_number(5)
CAMT053InputData.date = datetime.today().isoformat()
CAMT053InputData.Dt = date.today().isoformat()
# CAMT053FileProcessing.path = str(Path.home())
# CAMT053FileProcessing.path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
CAMT053FileProcessing.path = os.environ.get('myHome') # str(Path.home())
CAMT053FileProcessing.paramFilePath = CAMT053FileProcessing.path + "inputCAMT&PAIN\\"
if not os.path.exists('inputCAMT&PAIN'):
os.makedirs(CAMT053FileProcessing.paramFilePath)
self.createParam(CAMT053FileProcessing.outputFileName)
self.takeInputsForCAMT053FileProcessing(realAccount, transactionAccount, camtinput)
# Root = self.initiateXML()
rootElement = Element("Document")
tree = etree.ElementTree(rootElement)
rootElement.set("xmlns", "urn:iso:std:iso:20022:tech:xsd:camt.053.001.02")
rootElement.set("xmlns:xsd", "http://www.w3.org/2001/XMLSchema")
rootElement.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
BkToCstmrStmt = Element(CAMT053Tags.BkToCstmrStmtTag)
rootElement.append(BkToCstmrStmt)
self.createGrpHdr(BkToCstmrStmt)
self.createStmt(BkToCstmrStmt)
CAMT053FileProcessing.camtFilepath = CAMT053FileProcessing.path + "\\inputCAMT&PAIN\\" + \
CAMT053FileProcessing.outputFileName + ".att"
tempFileName = CAMT053FileProcessing.path + "\\inputCAMT&PAIN\\TempCAMTFile" + ".att"
tree.write(open(tempFileName, 'wb'), xml_declaration=True, encoding='utf-8')
vkb.xml(tempFileName, CAMT053FileProcessing.camtFilepath)
# def takeInputsForCAMT053FileProcessing(self,realAccount,transactionAccount,camtinput):
def takeInputsForCAMT053FileProcessing(self, realAccount, transactionAccount, camtinput):
CAMT053InputData.Amount = "80000.00" # camtinput.getDefaultAmount();
CAMT053InputData.TtlCdtNtries_Sum = CAMT053InputData.NbOfNtries_Sum = CAMT053InputData.Amount3 = CAMT053InputData.Amount2 = CAMT053InputData.Amount1 = CAMT053InputData.Amount = 0
CAMT053InputData.Acct_ID = realAccount # realAccount.getAccountNumber().toUpperCase()
CAMT053InputData.Ccy = 'NOK' # realAccount.getCurrency()
CAMT053InputData.TxsSummry = camtinput.get('txsSummry')
CAMT053InputData.Txs_Credit = camtinput.get('txs_Credit')
CAMT053InputData.Txs_Debit = camtinput.get('txs_Debit')
# custID = camtinput.getRootCustomer().getCustId();
CAMT053InputData.InstrId = transactionAccount # transactionAccount.getAccountNumber().toUpperCase()
Bal_Ccy = "NOK" # realAccount.getCurrency()
if camtinput.get('multipleTxn') == "Yes":
CAMT053FileProcessing.multiple = True
CAMT053InputData.Ntry_Credit = camtinput.get('ntry_Credit')
CAMT053InputData.Ntry_Debit = camtinput.get('ntry_Debit')
CAMT053InputData.Ntry_Credit_Amt = camtinput.get('ntry_Credit_Amt')
CAMT053InputData.Ntry_Credit_Ccy = 'NOK' # transactionAccount.getCurrency()
CAMT053InputData.Ntry_Debit_Amt = camtinput.get('ntry_Debit_Amt')
CAMT053InputData.Ntry_Debit_Ccy = 'NOK ' #transactionAccount.getCurrency()
def createGrpHdr(self, BkToCstmrStmt):
# GrpHdr
grpHdr = Element(CAMT053Tags.GrpHdrTag)
BkToCstmrStmt.append(grpHdr)
msgID = Element(CAMT053Tags.MsgIdTag)
msgID.text = CAMT053InputData.Random
grpHdr.append(msgID)
CreDtTm = Element(CAMT053Tags.CreDtTmTag)
CreDtTm.text = CAMT053InputData.date
grpHdr.append(CreDtTm)
# MsgRcpt
MsgRcpt = Element(CAMT053Tags.MsgRcptTag)
grpHdr.append(MsgRcpt)
nmt = Element(CAMT053Tags.NmTag)
nmt.text = CAMT053InputData.nm
MsgRcpt.append(nmt)
# PstlAdr
PstlAdr = Element(CAMT053Tags.PstlAdrTag)
MsgRcpt.append(PstlAdr)
StrtNm = Element(CAMT053Tags.StrtNmTag)
StrtNm.text = CAMT053InputData.StrtNm
PstlAdr.append(StrtNm)
BldgNb = Element(CAMT053Tags.BldgNbTag)
BldgNb.text = CAMT053InputData.BldgNb
PstlAdr.append(BldgNb)
PstCd = Element(CAMT053Tags.PstCdTag)
PstCd.text = CAMT053InputData.PstCd
PstlAdr.append(PstCd)
TwnNm = Element(CAMT053Tags.TwnNmTag)
TwnNm.text = CAMT053InputData.TwnNm
PstlAdr.append(TwnNm)
Ctry = Element(CAMT053Tags.CtryTag)
Ctry.text = CAMT053InputData.Ctry
PstlAdr.append(Ctry)
AdrLine = Element(CAMT053Tags.AdrLineTag)
AdrLine.text = CAMT053InputData.AdrLine
PstlAdr.append(AdrLine)
# ID
Id1 = Element(CAMT053Tags.IdTag)
MsgRcpt.append(Id1)
OrgId = Element(CAMT053Tags.OrgIdTag)
Id1.append(OrgId)
BICOrBEI = Element(CAMT053Tags.BICOrBEITag)
BICOrBEI.text = CAMT053InputData.BICOrBEI
OrgId.append(BICOrBEI)
Othr = Element(CAMT053Tags.OthrTag)
OrgId.append(Othr)
Id2 = Element(CAMT053Tags.IdTag)
Id2.text = CAMT053InputData.GrpHdr_Other_ID
Othr.append(Id2)
# MsgPgntn
MsgPgntn = Element(CAMT053Tags.MsgPgntnTag)
grpHdr.append(MsgPgntn)
PgNb = Element(CAMT053Tags.PgNbTag)
PgNb.text = CAMT053InputData.PgNb
MsgPgntn.append(PgNb)
LastPgInd = Element(CAMT053Tags.LastPgIndTag)
LastPgInd.text = CAMT053InputData.LastPgInd
MsgPgntn.append(LastPgInd)
# return BkToCstmrStmt
def createStmt(self, BkToCstmrStmt):
Stmt = Element(CAMT053Tags.StmtTag)
BkToCstmrStmt.append(Stmt)
# Stmt
Id = Element(CAMT053Tags.IdTag)
Id.text = CAMT053InputData.Random
Stmt.append(Id)
ElctrncSeqNb = Element(CAMT053Tags.ElctrncSeqNbTag)
ElctrncSeqNb.text = CAMT053InputData.ElctrncSeqNb
Stmt.append(ElctrncSeqNb)
CreDtTm = Element(CAMT053Tags.CreDtTmTag)
CreDtTm.text = CAMT053InputData.date
Stmt.append(CreDtTm)
self.createAccount(Stmt)
self.createBalanceCredit(Stmt)
self.createTxsSummry(Stmt)
self.createNtry(Stmt)
#return BkToCstmrStmt
def createAccount(self, Stmt):
# Acct
Acct = Element(CAMT053Tags.AcctTag)
Stmt.append(Acct)
Id = Element(CAMT053Tags.IdTag)
Acct.append(Id)
Othr = Element(CAMT053Tags.OthrTag)
Id.append(Othr)
Id2 = Element(CAMT053Tags.IdTag)
Id2.text = str(CAMT053InputData.Acct_ID)
Othr.append(Id2)
Ccy = Element(CAMT053Tags.CcyTag)
Ccy.text = CAMT053InputData.Ccy
Acct.append(Ccy)
Svcr = Element(CAMT053Tags.SvcrTag)
Acct.append(Svcr)
FinInstnId = Element(CAMT053Tags.FinInstnIdTag)
Svcr.append(FinInstnId)
BIC = Element(CAMT053Tags.BICTag)
BIC.text = CAMT053InputData.BIC
FinInstnId.append(BIC)
def createBalanceCredit(self, Stmt):
Bal_Cd = ""
Amount = 0.00
for i in range(4):
if i == 0:
Bal_Cd = CAMT053InputData.Bal_Cd
Amount = CAMT053InputData.Amount
elif i == 1:
Bal_Cd = CAMT053InputData.Bal_Cd1
Amount = CAMT053InputData.Amount1
elif i == 2:
Bal_Cd = CAMT053InputData.Bal_Cd2
Amount = CAMT053InputData.Amount2
elif i == 3:
Bal_Cd = CAMT053InputData.Bal_Cd3
Amount = CAMT053InputData.Amount3
Bal = Element(CAMT053Tags.BalTag)
Stmt.append(Bal)
Tp = Element(CAMT053Tags.TpTag)
Bal.append(Tp)
CdOrPrtry = Element(CAMT053Tags.CdOrPrtryTag)
Tp.append(CdOrPrtry)
Cd = Element(CAMT053Tags.CdTag)
Cd.text = Bal_Cd
CdOrPrtry.append(Cd)
Amt = Element(CAMT053Tags.AmtTag)
Amt.text = str(Amount)
Bal.append(Amt)
# set attribute to Amt
# Attr = Element(CAMT053Tags.CcyTag)
Amt.set(CAMT053Tags.CcyTag, "NOK")
CdtDbtInd = Element(CAMT053Tags.CdtDbtIndTag)
CdtDbtInd.text = "CRDT"
Bal.append(CdtDbtInd)
Dt1 = Element(CAMT053Tags.DtTag)
Bal.append(Dt1)
Dt2 = Element(CAMT053Tags.DtTag)
Dt2.text = CAMT053InputData.Dt
Dt1.append(Dt2)
def createTxsSummry(self, Stmt):
if CAMT053InputData.TxsSummry == "Yes":
# TxsSummry
TxsSummry = Element(CAMT053Tags.TxsSummryTag)
Stmt.append(TxsSummry)
# TtlNtries
TtlNtries = Element(CAMT053Tags.TtlNtriesTag)
TxsSummry.append(TtlNtries)
NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
NbOfNtries.text = CAMT053InputData.NbOfNtries
TtlNtries.append(NbOfNtries)
NbOfNtriesSum = Element(CAMT053Tags.SumTag)
NbOfNtriesSum.text = CAMT053InputData.NbOfNtries_Sum
TtlNtries.append(NbOfNtriesSum)
if CAMT053InputData.Txs_Credit == 1 and CAMT053InputData.Txs_Debit == 0:
# TtlCdtNtries
TtlCdtNtries = Element(CAMT053Tags.TtlCdtNtriesTag)
TxsSummry.append(TtlCdtNtries)
TtlCdtNtries_NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
TtlCdtNtries_NbOfNtries.text = CAMT053InputData.TtlCdtNtries
TtlCdtNtries.append(TtlCdtNtries_NbOfNtries)
TtlCdtNtriesSum = Element(CAMT053Tags.SumTag)
TtlCdtNtriesSum.text = CAMT053InputData.TtlCdtNtries_Sum
TtlCdtNtries.append(TtlCdtNtriesSum)
# TtlDbtNtries
TtlDbtNtries = Element(CAMT053Tags.TtlDbtNtriesTag)
TxsSummry.append(TtlDbtNtries)
TtlDbtNtries_NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
TtlDbtNtries_NbOfNtries.text = "0"
TtlDbtNtries.append(TtlDbtNtries_NbOfNtries)
TtlDbtNtriesSum = Element(CAMT053Tags.SumTag)
TtlDbtNtriesSum.text = "0"
TtlDbtNtries.append(TtlDbtNtriesSum)
TtlDbtNtries = Element(CAMT053Tags.TtlDbtNtriesTag)
TxsSummry.append(TtlDbtNtries)
TtlDbtNtries_NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
TtlDbtNtries_NbOfNtries.text = CAMT053InputData.NbOfNtries
TtlDbtNtries.append(TtlDbtNtries_NbOfNtries)
TtlDbtNtriesSum = Element(CAMT053Tags.SumTag)
TtlDbtNtriesSum.text = CAMT053InputData.NbOfNtries_Sum
TtlDbtNtries.append(TtlDbtNtriesSum)
elif CAMT053InputData.Txs_Credit == 0 and CAMT053InputData.Txs_Debit == 1:
# TtlCdtNtries
TtlCdtNtries = Element(CAMT053Tags.TtlCdtNtriesTag)
TxsSummry.append(TtlCdtNtries)
TtlCdtNtries_NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
TtlCdtNtries_NbOfNtries.text = 0
TtlCdtNtries.append(TtlCdtNtries_NbOfNtries)
TtlCdtNtriesSum = Element(CAMT053Tags.SumTag)
TtlCdtNtriesSum.text = 0
TtlCdtNtries.append(TtlCdtNtriesSum)
# TtlDbtNtries
TtlDbtNtries = Element(CAMT053Tags.TtlDbtNtriesTag)
TxsSummry.append(TtlDbtNtries)
TtlDbtNtries_NbOfNtries = Element(CAMT053Tags.NbOfNtriesTag)
TtlDbtNtries_NbOfNtries.text = CAMT053InputData.NbOfNtries
TtlDbtNtries.append(TtlDbtNtries_NbOfNtries)
TtlDbtNtriesSum = Element(CAMT053Tags.SumTag)
TtlDbtNtriesSum.text = CAMT053InputData.NbOfNtries_Sum
TtlDbtNtries.append(TtlDbtNtriesSum)
def createNtry(self, Stmt):
temp = CAMT053InputData.Random.split("-")
var = int(temp[len(temp) - 1])
if self.multiple == True:
if CAMT053InputData.Ntry_Credit >= 1:
i = 0
while i < CAMT053InputData.Ntry_Credit:
var = var + 1
CAMT053InputData.Random = CAMT053FileProcessing.random + "-" + str(var)
# Ntry
Ntry = Element(CAMT053Tags.NtryTag)
Stmt.append(Ntry)
NtryRef = Element(CAMT053Tags.NtryRefTag)
NtryRef.text = CAMT053InputData.Random
Ntry.append(NtryRef)
Amt = Element(CAMT053Tags.AmtTag)
Amt.text = str(CAMT053InputData.Ntry_Credit_Amt)
Ntry.append(Amt)
# set attribute to Amt
# Attr = Element(CAMT053Tags.CcyTag)
Amt.set(CAMT053Tags.CcyTag, "NOK")
CdtDbtInd = Element(CAMT053Tags.CdtDbtIndTag)
CdtDbtInd.text = "CRDT"
Ntry.append(CdtDbtInd)
Sts = Element(CAMT053Tags.StsTag)
Sts.text = CAMT053InputData.Sts
Ntry.append(Sts)
BookgDt = Element(CAMT053Tags.BookgDtTag)
Ntry.append(BookgDt)
Dt = Element(CAMT053Tags.DtTag)
Dt.text = CAMT053InputData.Dt
BookgDt.append(Dt)
ValDt = Element(CAMT053Tags.ValDtTag)
Ntry.append(ValDt)
Dt2 = Element(CAMT053Tags.DtTag)
Dt2.text = CAMT053InputData.Dt
ValDt.append(Dt2)
AcctSvcrRef = Element(CAMT053Tags.AcctSvcrRefTag)
AcctSvcrRef.text = CAMT053InputData.Random
Ntry.append(AcctSvcrRef)
# BkTxCd
BkTxCd = Element(CAMT053Tags.BkTxCdTag)
Ntry.append(BkTxCd)
Domn = Element(CAMT053Tags.DomnTag)
BkTxCd.append(Domn)
BkTxCd_Cd = Element(CAMT053Tags.CdTag)
BkTxCd_Cd.text = CAMT053InputData.BkTxCd_Cd
Domn.append(BkTxCd_Cd)
Fmly = Element(CAMT053Tags.FmlyTag)
Domn.append(Fmly)
Fmly_Cd = Element(CAMT053Tags.FmlyCdTag)
Fmly_Cd.text = CAMT053InputData.Fmly_Cd
Fmly.append(Fmly_Cd)
SubFmlyCd = Element(CAMT053Tags.SubFmlyCdTag)
SubFmlyCd.text = CAMT053InputData.SubFmlyCd
Fmly.append(SubFmlyCd)
Prtry = Element(CAMT053Tags.PrtryTag)
BkTxCd.append(Prtry)
Prtry_Cd = Element(CAMT053Tags.Prtry_CdTag)
Prtry_Cd.text = CAMT053InputData.Prtry_Cd
Prtry.append(Prtry_Cd)
Issr = Element(CAMT053Tags.IssrTag)
Issr.text = CAMT053InputData.Issr
Prtry.append(Issr)
self.createCrdtNtryDtls(Ntry)
i += 1
if CAMT053InputData.Ntry_Debit >= 1:
i = 0
while i < CAMT053InputData.Ntry_Debit:
var = var + 1
#CAMT053InputData.Random = + str(var)
# Ntry
Ntry = Element(CAMT053Tags.NtryTag)
Stmt.append(Ntry)
NtryRef = Element(CAMT053Tags.NtryRefTag)
NtryRef.text = CAMT053InputData.Random
Ntry.append(NtryRef)
Amt = Element(CAMT053Tags.AmtTag)
Amt.text = str(CAMT053InputData.Ntry_Debit_Amt)
Ntry.append(Amt)
# set attribute to Amt
Attr = Element(CAMT053Tags.CcyTag)
Attr.set(CAMT053InputData.Ntry_Credit_Ccy, "NOK")
CdtDbtInd = Element(CAMT053Tags.CdtDbtIndTag)
CdtDbtInd.text = "DBIT"
Ntry.append(CdtDbtInd)
Sts = Element(CAMT053Tags.StsTag)
Sts.text = CAMT053InputData.Sts
Ntry.append(Sts)
BookgDt = Element(CAMT053Tags.BookgDtTag)
Ntry.append(BookgDt)
Dt = Element(CAMT053Tags.DtTag)
Dt.text = CAMT053InputData.Dt
BookgDt.append(Dt)
ValDt = Element(CAMT053Tags.ValDtTag)
Ntry.append(ValDt)
Dt2 = Element(CAMT053Tags.DtTag)
Dt2.text = CAMT053InputData.Dt
ValDt.append(Dt2)
AcctSvcrRef = Element(CAMT053Tags.AcctSvcrRefTag)
AcctSvcrRef.text = CAMT053InputData.Random
Ntry.append(AcctSvcrRef)
# BkTxCd
BkTxCd = Element(CAMT053Tags.BkTxCdTag)
Ntry.append(BkTxCd)
Domn = Element(CAMT053Tags.DomnTag)
BkTxCd.append(Domn)
BkTxCd_Cd = Element(CAMT053Tags.CdTag)
BkTxCd_Cd.text = CAMT053InputData.BkTxCd_Cd
Domn.append(BkTxCd_Cd)
Fmly = Element(CAMT053Tags.FmlyTag)
Domn.append(Fmly)
Fmly_Cd = Element(CAMT053Tags.FmlyCdTag)
Fmly_Cd.text = CAMT053InputData.Fmly_Cd
Fmly.append(Fmly_Cd)
SubFmlyCd = Element(CAMT053Tags.SubFmlyCdTag)
SubFmlyCd.text = CAMT053InputData.SubFmlyCd
Fmly.append(SubFmlyCd)
Prtry = Element(CAMT053Tags.PrtryTag)
BkTxCd.append(Prtry)
Prtry_Cd = Element(CAMT053Tags.Prtry_CdTag)
Prtry_Cd.text = CAMT053InputData.Prtry_Cd
Prtry.append(Prtry_Cd)
Issr = Element(CAMT053Tags.IssrTag)
Issr.text = CAMT053InputData.Issr
Prtry.append(Issr)
self.createDbtrNtryDtls(Ntry)
i += 1
elif self.multiple == False:
if CAMT053InputData.Txs_Credit != 0:
# Ntry
Ntry = Element(CAMT053Tags.NtryTag)
Stmt.append(Ntry)
NtryRef = Element(CAMT053Tags.NtryRefTag)
NtryRef.text = CAMT053InputData.Random
Ntry.append(NtryRef)
Amt = Element(CAMT053Tags.AmtTag)
Amt.text = str(CAMT053InputData.TtlCdtNtries_Sum)
Ntry.append(Amt)
# set attribute to Amt
Attr = Element(CAMT053Tags.CcyTag)
Attr.set(CAMT053InputData.Ntry_Credit_Ccy, "NOK")
CdtDbtInd = Element(CAMT053Tags.CdtDbtIndTag)
CdtDbtInd.text = "CRDT"
Ntry.append(CdtDbtInd)
Sts = Element(CAMT053Tags.StsTag)
Sts.text = CAMT053InputData.Sts
Ntry.append(Sts)
BookgDt = Element(CAMT053Tags.BookgDtTag)
Ntry.append(BookgDt)
Dt = Element(CAMT053Tags.DtTag)
Dt.text = CAMT053InputData.Dt
BookgDt.append(Dt)
ValDt = Element(CAMT053Tags.ValDtTag)
Ntry.append(ValDt)
Dt2 = Element(CAMT053Tags.DtTag)
Dt2.text = CAMT053InputData.Dt
ValDt.append(Dt2)
AcctSvcrRef = Element(CAMT053Tags.AcctSvcrRefTag)
AcctSvcrRef.text = CAMT053InputData.Random
Ntry.append(AcctSvcrRef)
# BkTxCd
BkTxCd = Element(CAMT053Tags.BkTxCdTag)
Ntry.append(BkTxCd)
Domn = Element(CAMT053Tags.DomnTag)
BkTxCd.append(Domn)
BkTxCd_Cd = Element(CAMT053Tags.CdTag)
BkTxCd_Cd.text = CAMT053InputData.BkTxCd_Cd
Domn.append(BkTxCd_Cd)
Fmly = Element(CAMT053Tags.FmlyTag)
Domn.append(Fmly)
Fmly_Cd = Element(CAMT053Tags.FmlyCdTag)
Fmly_Cd.text = CAMT053InputData.Fmly_Cd
Fmly.append(Fmly_Cd)
SubFmlyCd = Element(CAMT053Tags.SubFmlyCdTag)
SubFmlyCd.text = CAMT053InputData.SubFmlyCd
Fmly.append(SubFmlyCd)
Prtry = Element(CAMT053Tags.PrtryTag)
BkTxCd.append(Prtry)
Prtry_Cd = Element(CAMT053Tags.Prtry_CdTag)
Prtry_Cd.text = CAMT053InputData.Prtry_Cd
Prtry.append(Prtry_Cd)
Issr = Element(CAMT053Tags.IssrTag)
Issr.text = CAMT053InputData.Issr
Prtry.append(Issr)
#self.createCrdtNtryDtls(Ntry)
elif CAMT053InputData.Txs_Debit != 0:
# Ntry
Ntry = Element(CAMT053Tags.NtryTag)
Stmt.append(Ntry)
NtryRef = Element(CAMT053Tags.NtryRefTag)
NtryRef.text = CAMT053InputData.Random
Ntry.append(NtryRef)
Amt = Element(CAMT053Tags.AmtTag)
Amt.text = str(CAMT053InputData.TtlCdtNtries_Sum)
Ntry.append(Amt)
# set attribute to Amt
Attr = Element(CAMT053Tags.CcyTag)
Attr.set(CAMT053InputData.Ntry_Credit_Ccy, "NOK")
CdtDbtInd = Element(CAMT053Tags.CdtDbtIndTag)
CdtDbtInd.text = "DBIT"
Ntry.append(CdtDbtInd)
Sts = Element(CAMT053Tags.StsTag)
Sts.text = CAMT053InputData.Sts
Ntry.append(Sts)
BookgDt = Element(CAMT053Tags.BookgDtTag)
Ntry.append(BookgDt)
Dt = Element(CAMT053Tags.DtTag)
Dt.text = CAMT053InputData.Dt
BookgDt.append(Dt)
ValDt = Element(CAMT053Tags.ValDtTag)
Ntry.append(ValDt)
Dt2 = Element(CAMT053Tags.DtTag)
Dt2.text = CAMT053InputData.Dt
ValDt.append(Dt2)
AcctSvcrRef = Element(CAMT053Tags.AcctSvcrRefTag)
AcctSvcrRef.text = CAMT053InputData.Random
Ntry.append(AcctSvcrRef)
# BkTxCd
BkTxCd = Element(CAMT053Tags.BkTxCdTag)
Ntry.append(BkTxCd)
Domn = Element(CAMT053Tags.DomnTag)
BkTxCd.append(Domn)
BkTxCd_Cd = Element(CAMT053Tags.CdTag)
BkTxCd_Cd.text = CAMT053InputData.BkTxCd_Cd
Domn.append(BkTxCd_Cd)
Fmly = Element(CAMT053Tags.FmlyTag)
Domn.append(Fmly)
Fmly_Cd = Element(CAMT053Tags.FmlyCdTag)
Fmly_Cd.text = CAMT053InputData.Fmly_Cd
Fmly.append(Fmly_Cd)
SubFmlyCd = Element(CAMT053Tags.SubFmlyCdTag)
SubFmlyCd.text = CAMT053InputData.SubFmlyCd
Fmly.append(SubFmlyCd)
Prtry = Element(CAMT053Tags.PrtryTag)
BkTxCd.append(Prtry)
Prtry_Cd = Element(CAMT053Tags.Prtry_CdTag)
Prtry_Cd.text = CAMT053InputData.Prtry_Cd
Prtry.append(Prtry_Cd)
Issr = Element(CAMT053Tags.IssrTag)
Issr.text = CAMT053InputData.Issr
Prtry.append(Issr)
#self.createDbtrNtryDtls(Ntry)
def createCrdtNtryDtls(self, Ntry):
NtryDtls = Element(CAMT053Tags.NtryDtlsTag)
Ntry.append(NtryDtls)
TxDtls = Element(CAMT053Tags.TxDtlsTag)
NtryDtls.append(TxDtls)
Refs = Element(CAMT053Tags.RefsTag)
TxDtls.append(Refs)
# Ref Inputs
InstrId = Element(CAMT053Tags.InstrIdTag)
InstrId.text = CAMT053InputData.InstrId
Refs.append(InstrId)
EndToEndId = Element(CAMT053Tags.EndToEndIdTag)
EndToEndId.text = CAMT053InputData.Random
Refs.append(EndToEndId)
# RltdPties
RltdPties = Element(CAMT053Tags.RltdPtiesTag)
TxDtls.append(RltdPties)
# Cdtr
Cdtr = Element(CAMT053Tags.CdtrTag)
RltdPties.append(Cdtr)
Id0 = Element(CAMT053Tags.IdTag)
Cdtr.append(Id0)
if self.iBANFlag == True:
PrvtId = Element(CAMT053Tags.PrvtIdTag)
Id0.append(PrvtId)
iban = Element(CAMT053Tags.iBANTag)
iban.text = CAMT053InputData.InstrId
PrvtId.append(iban)
elif self.iBANFlag == False:
OrgID = Element(CAMT053Tags.OrgIdTag)
Id0.append(OrgID)
OthrTag = Element(CAMT053Tags.OthrTag)
OrgID.append(OthrTag)
id = Element(CAMT053Tags.IdTag)
id.text = CAMT053InputData.InstrId
OthrTag.append(id)
# RmtInf
RmtInf = Element(CAMT053Tags.RmtInfTag)
TxDtls.append(RmtInf)
Ustrd = Element(CAMT053Tags.UstrdTag)
Ustrd.text = CAMT053InputData.InstrId
RmtInf.append(Ustrd)
def createDbtrNtryDtls(self, Ntry):
NtryDtls = Element(CAMT053Tags.NtryDtlsTag)
Ntry.append(NtryDtls)
TxDtls = Element(CAMT053Tags.TxDtlsTag)
NtryDtls.append(TxDtls)
Refs = Element(CAMT053Tags.RefsTag)
TxDtls.append(Refs)
# Ref Inputs
InstrId = Element(CAMT053Tags.InstrIdTag)
InstrId.text = CAMT053InputData.InstrId
Refs.append(InstrId)
EndToEndId = Element(CAMT053Tags.EndToEndIdTag)
EndToEndId.text = CAMT053InputData.Random
Refs.append(EndToEndId)
# RltdPties
RltdPties = Element(CAMT053Tags.RltdPtiesTag)
TxDtls.append(RltdPties)
# Cdtr
Dbdtr = Element(CAMT053Tags.DbtrTag)
RltdPties.append(Dbdtr)
Id0 = Element(CAMT053Tags.IdTag)
Dbdtr.append(Id0)
if self.iBANFlag == True:
PrvtId = Element(CAMT053Tags.PrvtIdTag)
Id0.append(PrvtId)
iban = Element(CAMT053Tags.iBANTag)
iban.text = CAMT053InputData.InstrId
PrvtId.append(iban)
elif self.iBANFlag == False:
OrgID = Element(CAMT053Tags.OrgIdTag)
Id0.append(OrgID)
OthrTag = Element(CAMT053Tags.OthrTag)
OrgID.append(OthrTag)
id = Element(CAMT053Tags.IdTag)
id.text = CAMT053InputData.InstrId
OthrTag.append(id)
# RmtInf
RmtInf = Element(CAMT053Tags.RmtInfTag)
TxDtls.append(RmtInf)
Ustrd = Element(CAMT053Tags.UstrdTag)
Ustrd.text = CAMT053InputData.InstrId
RmtInf.append(Ustrd)
def createParam(self, outputFileName):
document = Element("Head")
# tree = ElementTree(document)
tree = etree.ElementTree(document)
a1 = Element("a1")
a1.text = "xxxxxx" # CAMT053FileProcessing.custID
document.append(a1)
CAMT053InputData.BICOrBEI = ApplicationConfig.get('BICOrBEI')
a2 = Element("a2")
a2.text = CAMT053InputData.BICOrBEI
document.append(a2)
a4 = Element("a4")
a4.text = CAMT053InputData.camtFormat
document.append(a4)
incomingPath = ApplicationConfig.get('INCOMINGFILEPATH') + '/' + outputFileName + ".att"
a9 = Element("a9")
a9.text = incomingPath
document.append(a9)
a10 = Element("a10")
a10.text = outputFileName
document.append(a10)
a20 = Element("a20")
a20.text = "VAM"
document.append(a20)
CAMT053FileProcessing.paramFilePath = CAMT053FileProcessing.path + "\\inputCAMT&PAIN\\" + \
CAMT053FileProcessing.outputFileName + ".param"
tempFileName = CAMT053FileProcessing.path + "\\inputCAMT&PAIN\\TempFile" + ".param"
tree.write(open(tempFileName, 'wb'))
line = ""
file = open(tempFileName)
for line in file:
line = line.replace('<Head>', '')
line = line.replace('</Head>', '')
file.close()
vkb.xml(line, CAMT053FileProcessing.paramFilePath)
def ftpCAMT053Files(self):
SERVERIPADDR = ApplicationConfig.get('SERVERIPADDR')
FTP_USERID = ApplicationConfig.get('FTP_USERID')
FTP_PASSWORD = ApplicationConfig.get('FTP_PASSWORD')
LOCAL_CAMTPATH = CAMT053FileProcessing.camtFilepath
LOCAL_PARAMPATH = CAMT053FileProcessing.paramFilePath
INCOMINGFILEPATH = ApplicationConfig.get('INCOMINGFILEPATH')
INCOMINGFILEPATH = INCOMINGFILEPATH + '/' + CAMT053FileProcessing.outputFileName
if ApplicationConfig.get('SERVER_TYPE') == 'FTP':
CAMT053FileProcessing.ftpUtils.sendFileToFTPServer(SERVERIPADDR, FTP_USERID, FTP_PASSWORD, LOCAL_CAMTPATH,
INCOMINGFILEPATH)
else:
CAMT053FileProcessing.ftpUtils.sendFileToSFTPServer(SERVERIPADDR, FTP_USERID, FTP_PASSWORD,
LOCAL_CAMTPATH, INCOMINGFILEPATH + '.att')
CAMT053FileProcessing.ftpUtils.sendFileToSFTPServer(SERVERIPADDR, FTP_USERID, FTP_PASSWORD,
LOCAL_PARAMPATH, INCOMINGFILEPATH + '.param')
self.deleteFiles()
def deleteFiles(self):
CAMT053FileProcessing.paramFilePath = CAMT053FileProcessing.path + "\\inputCAMT&PAIN\\"
shutil.rmtree(CAMT053FileProcessing.paramFilePath)
#os.makedirs(CAMT053FileProcessing.paramFilePath)
camtinput = {
'txsSummry': 'No',
'txs_Credit': 0,
'txs_Debit': 0,
'multipleTxn': 'Yes',
'ntry_Credit': 2,
'ntry_Debit': 0,
'ntry_Credit_Amt': 1.00,
'ntry_Debit_Amt': 20000.00
}
cp = CAMT053FileProcessing()
cp.generateCAMT053('NO46049884454832', 'NO87410757015186', camtinput)
cp.ftpCAMT053Files()
|
984,750 | 20b4b86b56ece6759de5b482b41597db7e183e8e | import requests
from bs4 import BeautifulSoup as soup
import requests
from log import log as log
import time
from datetime import datetime
import random
import sqlite3
from bs4 import BeautifulSoup as soup
from discord_hooks import Webhook
from threading import Thread
from datetime import datetime
from datetime import datetime
from colorama import init
from termcolor import colored
init()
class Product():
def __init__(self, title, link, stock, keyword):
'''
(str, str, bool, str) -> None
Creates an instance of the Product class.
'''
# Setup product attributes
self.title = title
self.stock = stock
self.link = link
self.keyword = keyword
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def add_to_db(product):
'''
(Product) -> bool
Given a product <product>, the product is added to a database <products.db>
and whether or not a Discord alert should be sent out is returned. Discord
alerts are sent out based on whether or not a new product matching
keywords is found.
'''
# Initialize variables
title = product.title
stock = str(product.stock)
link = product.link
keyword = product.keyword
alert = False
now = datetime.now()
timestampStr = now.strftime(colored('[%d-%b-%Y [%H:%M:%S.%f]]', 'cyan'))
# Create database
conn = sqlite3.connect('products.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS products(title TEXT, link TEXT UNIQUE, stock TEXT, keywords TEXT)""")
# Add product to database if it's unique
try:
c.execute("""INSERT INTO products (title, link, stock, keywords) VALUES (?, ?, ?, ?)""", (title, link, stock, keyword))
log('s',timestampStr + colored("Found new product with keyword " + keyword + ". URL = " + link, 'green'))
alert = True
except:
# Product already exists
pass
#log('i', "Product at URL <" + link + "> already exists in the database.")
# Close connection to the database
conn.commit()
c.close()
conn.close()
# Return whether or not it's a new product
return alert
def send_embed(product):
'''
(Product) -> None
Sends a discord alert based on info provided.
'''
url = 'WEBHOOK HERE'
embed = Webhook(url, color=42320)
embed.set_author(name='MY-Monitor')
embed.set_desc("Found product based on keyword " + product.keyword)
embed.add_field(name="Link", value=product.link)
embed.set_footer(text='by keem#0815, Notify Beta', ts=True)
embed.post()
def monitor(link, keywords):
'''
(str, list of str) -> None
Given a URL <link> and keywords <keywords>, the URL is scanned and alerts
are sent via Discord when a new product containing a keyword is detected.
'''
now = datetime.now()
timestampStr = now.strftime(colored('[%d-%b-%Y [%H:%M:%S.%f]]', 'cyan'))
log('i',timestampStr + colored("Scraping site... URL:" + link + "...", 'yellow'))
# Parse the site from the link
pos_https = link.find("https://")
pos_http = link.find("http://")
if(pos_https == 0):
site = link[8:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "https://" + site
else:
site = link[7:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "http://" + site
# Get all the links on the "New Arrivals" page
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e',timestampStr + colored("Connection to URL failed.Retrying... URL: " + link, 'red'))
time.sleep(5)
try:
r = requests.get(link, timeout=8, verify=False)
except:
log('e',timestampStr + colored("Connection to URL <" + link + "> failed.", 'red'))
return
page = soup(r.text, "html.parser")
raw_links = page.findAll("a")
hrefs = []
for raw_link in raw_links:
try:
hrefs.append(raw_link["href"])
except:
pass
# Check for links matching keywords
for href in hrefs:
found = False
for keyword in keywords:
if(keyword.upper() in href.upper()):
found = True
if("http" in href):
product_page = href
else:
product_page = site + href
product = Product("N/A", product_page, True, keyword)
alert = add_to_db(product)
if(alert):
send_embed(product)
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Keywords (seperated by -)
keywords = [
"bred-toe",
"gold-toe",
"pharrell",
"free-throw-line",
"ld-waffle",
"nike-air-max",
"game-royal",
"yeezy",
"human-race",
"sacai",
"yeezy-350",
"obsidian",
"nike-sb-parra",
"air-jordan",
"ovo-jordan",
"air-jordan-1",
"wotherspoon",
"air-jordan-IV-gym-red",
"air-jordan-1-obsidian"
]
# Load sites from file
sites = read_from_txt("other-sites.txt")
# Start monitoring sites
while(True):
threads = []
for site in sites:
t = Thread(target=monitor, args=(site, keywords))
threads.append(t)
t.start()
time.sleep(2) # 2 second delay before going to the next site |
984,751 | 983d6a10b5cb73d6ae3ca4bedd4c98dc888ebf33 | def start():
"""Start the task"""
pass
def pause():
"""Pause the task"""
pass
def finish():
"""Finish the task"""
pass
def status():
"""Status of the task"""
pass
def abort():
"""Abort the task"""
pass
def remove():
"""Remove the task"""
pass
|
984,752 | 2cbb4a21b86f06a490ae1a39a542983874ee09dd | # 元祖就是"一个不可变的列表" type
# 1、作用:按照索引/位置存放多个值,只用于读不用于改
# 2、
t = (10) # 单独一个括号代表包含的意思
print(type(t))
t = (10,) #如果元祖只有一个元素,必须加逗号
print(type(t))
# 元祖里面的元素如果是列表,则列表内的值可更改,列表不可更改
# 3、类型转换:但凡能够被for循环遍历的类型都可以当作参数传给list()转换成列表
res = tuple({'k1': 111, 'k2': 222, 'k3':333})
print(res)
aa = tuple('hello')
print(aa[1])
# 内置方法
# 4.1 优先掌握:按索引取值
msg = (111, 'egon', 'hello')
# 正向取
# 反向取
# 可以取也可以改,索引不存在时,则报错
# print(msg[5])
# 字符串只能取
# 4.1.2 切片|步长|反向步长
# res = msg[0:5:2]
# res = msg[5:0:-1]
# res = msg[::-1] # 把字符串倒过来
# res = msg[:5]
print(msg[0:len(msg)])
msg1 = msg[:]
print(msg1) # 切片相当于拷贝行为,而且相当于浅拷贝
# print(res, '\n', msg)
# 4.1.3 len
# print(len(msg))
# 4.1.4 in |not in
|
984,753 | da14b42ab333a9971963268b5e8c579c88541f04 | class StringCursor:
def __init__(self, string):
self.string = string
self.cursor = 0
def end(self):
return self.cursor == len(self.string)
def read(self):
return self.string[self.cursor:]
def read_one(self):
return self.string[self.cursor]
def increment(self, distance=1):
self.cursor += distance
|
984,754 | 3fcea78258217134e08780e604b35544626b4d97 | print(hi!11)
|
984,755 | 68da5643e77b356814fd833de7d420fcd83257ba | # -*- coding: utf-8 -*-
# -*- python 3 -*-
# -*- hongzhong Lu -*-
import os
os.chdir('/Users/luho/PycharmProjects/model/model_correction/code')
exec(open("./find_subsystem_Yeast8_using_code.py").read())
#it can be found that the reaction in different software is different in some formats
#thus the reaction list will be based on R function to keep the consistent
subsystem_map = pd.read_excel('../result/subsystem_manual check results.xlsx')
gem_dataframe['subsystem_map'] = singleMapping(subsystem_map['Subsystem_for yeast map'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
gem_dataframe['removed_duplicate_subsystem'] = singleMapping(subsystem_map['removed_duplicate_subsystem'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
gem_dataframe['evidence'] = singleMapping(subsystem_map['evidence'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
#add the subsystem obtained based on geneID
for i in range(0,len(gem_dataframe['subsystem_map'])):
if gem_dataframe['subsystem_map'][i] is None:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_xref'][i]
else:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_map'][i]
#add the subsystem obtained based on the keggID
for i in range(0,len(gem_dataframe['subsystem_map'])):
if gem_dataframe['subsystem_map'][i] is '':
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_rxnID'][i]
else:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_map'][i]
#add the information from manual check results for these reactions connected with new genes
subsystem_manual = pd.read_excel('../data/subsytem_for new genes added into Yeast8.xlsx')
subsystem_manual['inf'] = subsystem_manual.loc[:,'subpathway'] + ' @@ ' + subsystem_manual.loc[:,'note']
rxn_gene['subsystem_manual'] = multiMapping(subsystem_manual['inf'],subsystem_manual['gene'],rxn_gene['gene'],sep=" // ")
gem_dataframe['subsytem_manual_newGene'] = multiMapping(rxn_gene['subsystem_manual'] ,rxn_gene['rxnID'] ,gem_dataframe['rxnID'],sep=" // ")
gem_dataframe['subsytem_manual_newGene'] = RemoveDuplicated(gem_dataframe['subsytem_manual_newGene'].tolist())
#add the information from reaction notes for these reactions from biolog experiments
evidences_biolog = pd.read_excel('../data/classification for new reactions from biolog_result.xlsx')
evidences_biolog['inf'] = evidences_biolog.loc[:,'source'] + ' @@ ' + evidences_biolog.loc[:,'external_ID']
gem_dataframe['note'] = multiMapping(evidences_biolog['inf'], evidences_biolog['rxnID'] ,gem_dataframe['rxnID'],sep=" // ")
saveExcel(gem_dataframe,"../result/subsystem for yeast8 map.xlsx")
#refine the subsystem for the yeast map based on the reaction number and manual check results
subsystem_map_v2 = pd.read_excel('../result/subsystem for yeast8 map_V2.xlsx')
|
984,756 | 3a724b31b6af979167139e393478e58315a945af | #Henry Nolan-Clutterbuck
#23/09/14
#Spot check 1
width = int(input("Please enter the width of the pool:"))
depth = int(input("Please enter the depth of the pool:"))
length = int(input("Please enter the length of the pool:"))
recvolume = (length*width*depth)
circleradius= width/2
circlearea=(3.14*(circleradius*circleradius))
halfcirclevolume=((circlearea/2)*depth)
poolvolume=recvolume+halfcirclevolume
print("The volume of the pool is {0}".format(poolvolume))
|
984,757 | fff039c169c2b38236a9f7c891ecadd127d4422a | import unittest
from tests.question_test import QuestionTest
from tests.topic_test import TopicTest
from tests.user_topic_test import UserTopicTest
from tests.quiz_test import QuizTest
from tests.difficulty_test import DifficultyTest
if __name__ == "__main__":
unittest.main()
|
984,758 | e900fc4d87f7e89f7407da811d654c16bbc14676 | import math
from time import sleep
def obj(params):
x = params['x']
sleep(3)
return math.sin(x) |
984,759 | 7f601310d4ea025ec9c0671c30e9283994db6717 | from django.shortcuts import render
from django.views import generic
import os
from datetime import datetime
from subway.models import MapPrep
# View render requests
def index(request):
"""View function for main page of site"""
mp = MapPrep()
currentYear = datetime.now().year
context = {
'currentYear': currentYear
}
# If the map is not up to date then update it
# if not mp.map_is_current:
# mp.update_map()
return render(request, 'layout.html', context)
|
984,760 | 5070b0d7bd45adb0626249fb41421eaaa6af55ea | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import cv2
from scipy.misc import imread
import time
import os, random
import warnings
slim = tf.contrib.slim #For depthwise separable strided atrous convolutions
tf.logging.set_verbosity(tf.logging.DEBUG)
features0 = 32
features1 = 2*features0 #Number of features to use after 1st convolution
features2 = 2*features1 #Number of features after 2nd convolution
features3 = 3*features1 #Number of features after 3rd convolution
features4 = 4*features1 #Number of features after 4th convolution
aspp_filters = features4 #Number of features for atrous convolutional spatial pyramid pooling
aspp_rateSmall = 6
aspp_rateMedium = 12
aspp_rateLarge = 18
trainDir = "E:/stills_hq/train/"
valDir = "E:/stills_hq/val/"
testDir = "E:/stills_hq/test/"
modelSavePeriod = 1 #Train timestep in hours
modelSavePeriod *= 3600 #Convert to s
model_dir = "E:/models/noise2/"
shuffle_buffer_size = 10000
num_parallel_calls = 6
num_parallel_readers = 6
prefetch_buffer_size = 64
#batch_size = 8 #Batch size to use during training
num_epochs = 1000000 #Dataset repeats indefinitely
logDir = "C:/dump/train/"
log_file = model_dir+"log.txt"
log_every = 1 #Log every _ examples
cumProbs = np.array([]) #Indices of the distribution plus 1 will be correspond to means
#Remove extreme intensities
removeLower = 0.01
removeUpper = 0.01
numMeans = 64
scaleMean = 4 #Each means array index increment corresponds to this increase in the mean
numDynamicGrad = 10 # Number of gradients to calculate for each possible mean when dynamically updating training
lossSmoothingBoxcarSize = 5
#Dimensions of images in the dataset
height = width = 2048
channels = 1 #Greyscale input image
#Sidelength of images to feed the neural network
cropsize = 1024
height_crop = width_crop = cropsize
def _tf_fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5):
window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1,1,1,1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1,1,1,1],padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1,1,1,1],padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1,1,1,1],padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1,1,1,1],padding='VALID') - mu1_mu2
if cs_map:
value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1,2,2,1], [1,2,2,1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1,2,2,1], [1,2,2,1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(mcs[0:level-1]**weight[0:level-1])*
(mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
####Noise1
### Initial idea: aspp, batch norm + Leaky RELU, residual connection and lower feature numbers
#def architecture(lq, img=None, mode=None):
# """Atrous convolutional encoder-decoder noise-removing network"""
# phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training
# concat_axis = 3
# ##Reusable blocks
# def conv_block(input, filters, phase=phase):
# """
# Convolution -> batch normalisation -> leaky relu
# phase defaults to true, meaning that the network is being trained
# """
# conv_block = tf.layers.conv2d(
# inputs=input,
# filters=filters,
# kernel_size=3,
# padding="SAME",
# activation=tf.nn.relu)
# #conv_block = tf.contrib.layers.batch_norm(
# # conv_block,
# # center=True, scale=True,
# # is_training=phase)
# #conv_block = tf.nn.leaky_relu(
# # features=conv_block,
# # alpha=0.2)
# #conv_block = tf.nn.relu(conv_block)
# return conv_block
# def aspp_block(input, phase=phase):
# """
# Atrous spatial pyramid pooling
# phase defaults to true, meaning that the network is being trained
# """
# #Convolutions at multiple rates
# conv1x1 = tf.layers.conv2d(
# inputs=input,
# filters=aspp_filters,
# kernel_size=1,
# padding="same",
# activation=tf.nn.relu,
# name="1x1")
# #conv1x1 = tf.contrib.layers.batch_norm(
# # conv1x1,
# # center=True, scale=True,
# # is_training=phase)
# conv3x3_rateSmall = tf.layers.conv2d(
# inputs=input,
# filters=aspp_filters,
# kernel_size=3,
# padding="same",
# dilation_rate=aspp_rateSmall,
# activation=tf.nn.relu,
# name="lowRate")
# #conv3x3_rateSmall = tf.contrib.layers.batch_norm(
# # conv3x3_rateSmall,
# # center=True, scale=True,
# # is_training=phase)
# conv3x3_rateMedium = tf.layers.conv2d(
# inputs=input,
# filters=aspp_filters,
# kernel_size=3,
# padding="same",
# dilation_rate=aspp_rateMedium,
# activation=tf.nn.relu,
# name="mediumRate")
# #conv3x3_rateMedium = tf.contrib.layers.batch_norm(
# # conv3x3_rateMedium,
# # center=True, scale=True,
# # is_training=phase)
# conv3x3_rateLarge = tf.layers.conv2d(
# inputs=input,
# filters=aspp_filters,
# kernel_size=3,
# padding="same",
# dilation_rate=aspp_rateLarge,
# activation=tf.nn.relu,
# name="highRate")
# #conv3x3_rateLarge = tf.contrib.layers.batch_norm(
# # conv3x3_rateLarge,
# # center=True, scale=True,
# # is_training=phase)
# #Image-level features
# pooling = tf.nn.pool(
# input=input,
# window_shape=(2,2),
# pooling_type="AVG",
# padding="SAME",
# strides=(2, 2))
# #Use 1x1 convolutions to project into a feature space the same size as the atrous convolutions'
# pooling = tf.layers.conv2d(
# inputs=pooling,
# filters=aspp_filters,
# kernel_size=1,
# padding="SAME",
# name="imageLevel")
# pooling = tf.image.resize_images(pooling, [64, 64])
# #pooling = tf.contrib.layers.batch_norm(
# # pooling,
# # center=True, scale=True,
# # is_training=phase)
# #Concatenate the atrous and image-level pooling features
# concatenation = tf.concat(
# values=[conv1x1, conv3x3_rateSmall, conv3x3_rateMedium, conv3x3_rateLarge, pooling],
# axis=concat_axis)
# #Reduce the number of channels
# reduced = tf.layers.conv2d( #Not sure if this is the correct way to reshape...
# inputs=concatenation,
# filters=aspp_filters,
# kernel_size=1,
# padding="SAME")
# return reduced
# def strided_conv_block(input, filters, stride, rate=1, phase=phase):
# return slim.separable_convolution2d(
# inputs=input,
# num_outputs=filters,
# kernel_size=3,
# depth_multiplier=1,
# stride=stride,
# padding='SAME',
# data_format='NHWC',
# rate=rate,
# activation_fn=tf.nn.relu,
# normalizer_fn=None,
# normalizer_params=None,
# weights_initializer=tf.contrib.layers.xavier_initializer(),
# weights_regularizer=None,
# biases_initializer=tf.zeros_initializer(),
# biases_regularizer=None,
# reuse=None,
# variables_collections=None,
# outputs_collections=None,
# trainable=True,
# scope=None)
# def deconv_block(input, filters, phase=phase):
# '''Transpositionally convolute a feature space to upsample it'''
# deconv_block = tf.layers.conv2d_transpose(
# inputs=input,
# filters=filters,
# kernel_size=3,
# strides=2,
# padding="SAME",
# activation=tf.nn.relu)
# #deconv_block = tf.contrib.layers.batch_norm(
# # deconv_block,
# # center=True, scale=True,
# # is_training=phase)
# #deconv_block = tf.nn.leaky_relu(
# # features=deconv_block,
# # alpha=0.2)
# #deconv_block = tf.nn.relu(deconv_block)
# return deconv_block
# '''Model building'''
# input_layer = tf.reshape(lq, [-1, cropsize, cropsize, channels])
# #Encoding block 0
# cnn0_last = conv_block(
# input=input_layer,
# filters=features0)
# cnn0_strided = strided_conv_block(
# input=cnn0_last,
# filters=features0,
# stride=2)
# #Encoding block 1
# cnn1_last = conv_block(
# input=cnn0_strided,
# filters=features1)
# cnn1_strided = strided_conv_block(
# input=cnn1_last,
# filters=features1,
# stride=2)
# #Encoding block 2
# cnn2_last = conv_block(
# input=cnn1_strided,
# filters=features2)
# cnn2_strided = strided_conv_block(
# input=cnn2_last,
# filters=features2,
# stride=2)
# #Encoding block 3
# #cnn3 = conv_block(
# # input=cnn2_strided,
# # filters=features3)
# #cnn3_last = conv_block(
# # input=cnn3,
# # filters=features3)
# cnn3_last = conv_block(
# input=cnn2_strided,
# filters=features3)
# cnn3_strided = strided_conv_block(
# input=cnn3_last,
# filters=features3,
# stride=2)
# #Encoding block 4
# #cnn4 = conv_block(
# # input=cnn3_strided,
# # filters=features4)
# #cnn4_last = conv_block(
# # input=cnn4,
# # filters=features4)
# cnn4_last = conv_block(
# input=cnn3_strided,
# filters=features4)
# #cnn4_strided = split_separable_conv2d(
# # inputs=cnn4_last,
# # filters=features4,
# # rate=2,
# # stride=2)
# #Prepare for aspp
# aspp_input = strided_conv_block(
# input=cnn4_last,
# filters=features4,
# stride=1,
# rate=2)
# aspp_input = conv_block(
# input=aspp_input,
# filters=features4)
# ##Atrous spatial pyramid pooling
# aspp = aspp_block(aspp_input)
# #Upsample the semantics by a factor of 4
# #upsampled_aspp = tf.image.resize_bilinear(
# # images=aspp,
# # tf.shape(aspp)[1:3],
# # align_corners=True)
# #Decoding block 1 (deepest)
# deconv4 = conv_block(aspp, features4)
# #deconv4 = conv_block(deconv4, features4)
# #Decoding block 2
# deconv4to3 = deconv_block(deconv4, features4)
# concat3 = tf.concat(
# values=[deconv4to3, cnn3_last],
# axis=concat_axis)
# deconv3 = conv_block(concat3, features3)
# #deconv3 = conv_block(deconv3, features3)
# #Decoding block 3
# deconv3to2 = deconv_block(deconv3, features3)
# concat2 = tf.concat(
# values=[deconv3to2, cnn2_last],
# axis=concat_axis)
# deconv2 = conv_block(concat2, features2)
# #Decoding block 4
# deconv2to1 = deconv_block(deconv2, features2)
# concat1 = tf.concat(
# values=[deconv2to1, cnn1_last],
# axis=concat_axis)
# deconv1 = conv_block(concat1, features1)
# #Decoding block 5
# deconv1to0 = deconv_block(deconv1, features1)
# concat0 = tf.concat(
# values=[deconv1to0, cnn0_last],
# axis=concat_axis)
# deconv1 = conv_block(concat0, features0)
# #Create final image with 1x1 convolutions
# deconv_final = tf.layers.conv2d_transpose(
# inputs=deconv1,
# filters=1,
# kernel_size=3,
# padding="SAME",
# activation=tf.nn.relu)
# #Residually connect the input to the output
# output = deconv_final#+input_layer
# #Image values will be between 0 and 1
# output = tf.clip_by_value(
# output,
# clip_value_min=0,
# clip_value_max=1)
# if phase: #Calculate loss during training
# ground_truth = tf.reshape(img, [-1, cropsize, cropsize, channels])
# loss = 1.0-tf_ssim(output, ground_truth)#cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))
# #tf.log(cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))+1)
# #tf.summary.histogram("loss", loss)
# else:
# loss = -1
# return loss, output
###Second noise architecture
###More convolutions between strides
def architecture(lq, img=None, mode=None):
"""Atrous convolutional encoder-decoder noise-removing network"""
phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training
concat_axis = 3
##Reusable blocks
def conv_block(input, filters, phase=phase):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = tf.layers.conv2d(
inputs=input,
filters=filters,
kernel_size=3,
padding="SAME",
activation=tf.nn.relu)
#conv_block = tf.contrib.layers.batch_norm(
# conv_block,
# center=True, scale=True,
# is_training=phase)
#conv_block = tf.nn.leaky_relu(
# features=conv_block,
# alpha=0.2)
#conv_block = tf.nn.relu(conv_block)
return conv_block
def aspp_block(input, phase=phase):
"""
Atrous spatial pyramid pooling
phase defaults to true, meaning that the network is being trained
"""
#Convolutions at multiple rates
conv1x1 = tf.layers.conv2d(
inputs=input,
filters=aspp_filters,
kernel_size=1,
padding="same",
activation=tf.nn.relu,
name="1x1")
#conv1x1 = tf.contrib.layers.batch_norm(
# conv1x1,
# center=True, scale=True,
# is_training=phase)
conv3x3_rateSmall = tf.layers.conv2d(
inputs=input,
filters=aspp_filters,
kernel_size=3,
padding="same",
dilation_rate=aspp_rateSmall,
activation=tf.nn.relu,
name="lowRate")
#conv3x3_rateSmall = tf.contrib.layers.batch_norm(
# conv3x3_rateSmall,
# center=True, scale=True,
# is_training=phase)
conv3x3_rateMedium = tf.layers.conv2d(
inputs=input,
filters=aspp_filters,
kernel_size=3,
padding="same",
dilation_rate=aspp_rateMedium,
activation=tf.nn.relu,
name="mediumRate")
#conv3x3_rateMedium = tf.contrib.layers.batch_norm(
# conv3x3_rateMedium,
# center=True, scale=True,
# is_training=phase)
conv3x3_rateLarge = tf.layers.conv2d(
inputs=input,
filters=aspp_filters,
kernel_size=3,
padding="same",
dilation_rate=aspp_rateLarge,
activation=tf.nn.relu,
name="highRate")
#conv3x3_rateLarge = tf.contrib.layers.batch_norm(
# conv3x3_rateLarge,
# center=True, scale=True,
# is_training=phase)
#Image-level features
pooling = tf.nn.pool(
input=input,
window_shape=(2,2),
pooling_type="AVG",
padding="SAME",
strides=(2, 2))
#Use 1x1 convolutions to project into a feature space the same size as the atrous convolutions'
pooling = tf.layers.conv2d(
inputs=pooling,
filters=aspp_filters,
kernel_size=1,
padding="SAME",
name="imageLevel")
pooling = tf.image.resize_images(pooling, [64, 64])
#pooling = tf.contrib.layers.batch_norm(
# pooling,
# center=True, scale=True,
# is_training=phase)
#Concatenate the atrous and image-level pooling features
concatenation = tf.concat(
values=[conv1x1, conv3x3_rateSmall, conv3x3_rateMedium, conv3x3_rateLarge, pooling],
axis=concat_axis)
#Reduce the number of channels
reduced = tf.layers.conv2d( #Not sure if this is the correct way to reshape...
inputs=concatenation,
filters=aspp_filters,
kernel_size=1,
padding="SAME")
return reduced
def strided_conv_block(input, filters, stride, rate=1, phase=phase):
return slim.separable_convolution2d(
inputs=input,
num_outputs=filters,
kernel_size=3,
depth_multiplier=1,
stride=stride,
padding='SAME',
data_format='NHWC',
rate=rate,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
def deconv_block(input, filters, phase=phase):
'''Transpositionally convolute a feature space to upsample it'''
deconv_block = tf.layers.conv2d_transpose(
inputs=input,
filters=filters,
kernel_size=3,
strides=2,
padding="SAME",
activation=tf.nn.relu)
#deconv_block = tf.contrib.layers.batch_norm(
# deconv_block,
# center=True, scale=True,
# is_training=phase)
#deconv_block = tf.nn.leaky_relu(
# features=deconv_block,
# alpha=0.2)
#deconv_block = tf.nn.relu(deconv_block)
return deconv_block
'''Model building'''
input_layer = tf.reshape(lq, [-1, cropsize, cropsize, channels])
#Encoding block 0
cnn0 = conv_block(
input=input_layer,
filters=features0)
cnn0_last = conv_block(
input=cnn0,
filters=features0)
cnn0_strided = strided_conv_block(
input=cnn0_last,
filters=features0,
stride=2)
#Encoding block 1
cnn1 = conv_block(
input=cnn0_strided,
filters=features1)
cnn1_last = conv_block(
input=cnn1,
filters=features1)
cnn1_strided = strided_conv_block(
input=cnn1_last,
filters=features1,
stride=2)
#Encoding block 2
cnn2 = conv_block(
input=cnn1_strided,
filters=features2)
cnn2_last = conv_block(
input=cnn2,
filters=features2)
cnn2_strided = strided_conv_block(
input=cnn2_last,
filters=features2,
stride=2)
#Encoding block 3
cnn3 = conv_block(
input=cnn2_strided,
filters=features3)
cnn3 = conv_block(
input=cnn3,
filters=features3)
cnn3_last = conv_block(
input=cnn3,
filters=features3)
cnn3_strided = strided_conv_block(
input=cnn3_last,
filters=features3,
stride=2)
#Encoding block 4
cnn4 = conv_block(
input=cnn3_strided,
filters=features4)
cnn4 = conv_block(
input=cnn4,
filters=features4)
cnn4_last = conv_block(
input=cnn4,
filters=features4)
#cnn4_strided = split_separable_conv2d(
# inputs=cnn4_last,
# filters=features4,
# rate=2,
# stride=2)
#Prepare for aspp
aspp_input = strided_conv_block(
input=cnn4_last,
filters=features4,
stride=1,
rate=2)
aspp_input = conv_block(
input=aspp_input,
filters=features4)
##Atrous spatial pyramid pooling
aspp = aspp_block(aspp_input)
#Upsample the semantics by a factor of 4
#upsampled_aspp = tf.image.resize_bilinear(
# images=aspp,
# tf.shape(aspp)[1:3],
# align_corners=True)
#Decoding block 1 (deepest)
deconv4 = conv_block(aspp, features4)
deconv4 = conv_block(deconv4, features4)
deconv4 = conv_block(deconv4, features4)
#Decoding block 2
deconv4to3 = deconv_block(deconv4, features4)
concat3 = tf.concat(
values=[deconv4to3, cnn3_last],
axis=concat_axis)
deconv3 = conv_block(concat3, features3)
deconv3 = conv_block(deconv3, features3)
deconv3 = conv_block(deconv3, features3)
#Decoding block 3
deconv3to2 = deconv_block(deconv3, features3)
concat2 = tf.concat(
values=[deconv3to2, cnn2_last],
axis=concat_axis)
deconv2 = conv_block(concat2, features2)
deconv2 = conv_block(deconv2, features2)
#Decoding block 4
deconv2to1 = deconv_block(deconv2, features2)
concat1 = tf.concat(
values=[deconv2to1, cnn1_last],
axis=concat_axis)
deconv1 = conv_block(concat1, features1)
deconv1 = conv_block(deconv1, features1)
#Decoding block 5
deconv1to0 = deconv_block(deconv1, features1)
concat0 = tf.concat(
values=[deconv1to0, cnn0_last],
axis=concat_axis)
deconv0 = conv_block(concat0, features0)
deconv0 = conv_block(deconv0, features0)
#Create final image with 1x1 convolutions
deconv_final = tf.layers.conv2d_transpose(
inputs=deconv0,
filters=1,
kernel_size=3,
padding="SAME",
activation=tf.nn.relu)
#Residually connect the input to the output
output = deconv_final#+input_layer
#Image values will be between 0 and 1
output = tf.clip_by_value(
output,
clip_value_min=0,
clip_value_max=1)
if phase: #Calculate loss during training
ground_truth = tf.reshape(img, [-1, cropsize, cropsize, channels])
loss = 1.0-tf_ssim(output, ground_truth)#cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))
#tf.log(cropsize*cropsize*tf.reduce_mean(tf.squared_difference(output, ground_truth))+1)
#tf.summary.histogram("loss", loss)
else:
loss = -1
return loss, output
def load_image(addr, resizeSize=None, imgType=np.float32):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
img = imread(addr, mode='F')
if resizeSize:
img = cv2.resize(img, resizeSize, interpolation=cv2.INTER_CUBIC)
img = img.astype(imgType)
return img
def scale0to1(img):
"""Rescale image between 0 and 1"""
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def gen_lq(img, scale):
'''Generate low quality image'''
#Ensure that the seed is random
np.random.seed(int(np.random.rand()*(2**32-1)))
#Adjust the image scale so that the image has the
# correct average counts
lq = np.random.poisson( img * scale )
return scale0to1(lq)
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = int(8*np.random.rand())
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def preprocess(img):
"""
Threshold the image to remove dead or very bright pixels.
Then crop a region of the image of a random size and resize it.
"""
sorted = np.sort(img, axis=None)
min = sorted[int(removeLower*sorted.size)]
max = sorted[int((1.0-removeUpper)*sorted.size)]
size = int(cropsize + np.random.rand()*(height-cropsize))
topLeft_x = int(np.random.rand()*(height-size))
topLeft_y = int(np.random.rand()*(height-size))
crop = np.clip(img[topLeft_y:(topLeft_y+cropsize), topLeft_x:(topLeft_x+cropsize)], min, max)
resized = cv2.resize(crop, (cropsize, cropsize), interpolation=cv2.INTER_AREA)
resized[np.isnan(resized)] = 0.5
resized[np.isinf(resized)] = 0.5
return scale0to1(flip_rotate(resized))
def get_scale():
"""Generate a mean from the cumulative probability distribution"""
return 0.5
def parser(record, dir):
"""Parse files and generate lower quality images from them"""
with warnings.catch_warnings():
try:
img = load_image(record)
img = preprocess(img)
scale = get_scale()
lq = gen_lq(img, scale)
img = (np.mean(lq) * img / np.mean(img)).clip(0.0, 1.0)
#cv2.namedWindow('dfsd',cv2.WINDOW_NORMAL)
#cv2.imshow("dfsd", lq)
#cv2.waitKey(0)
#cv2.namedWindow('dfsd',cv2.WINDOW_NORMAL)
#cv2.imshow("dfsd", img)
#cv2.waitKey(0)
except RuntimeWarning as e:
print("Catching this RuntimeWarning is getting personal...")
print(e)
lq, img = parser(dir+random.choice(os.listdir(dir)), dir)
return lq, img
def input_fn(dir):
"""Create a dataset from a list of filenames"""
dataset = tf.data.Dataset.list_files(dir+"*.tif")
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.map(
lambda file: tuple(tf.py_func(parser, [file, dir], [tf.float32, tf.float32])),
num_parallel_calls=num_parallel_calls)
#dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
dataset = dataset.repeat(num_epochs)
iter = dataset.make_one_shot_iterator()
lq, img = iter.get_next()
return lq, img
def movingAverage(values, window):
weights = np.repeat(1.0, window)/window
ma = np.convolve(values, weights, 'same')
return ma
def get_training_probs(losses0, losses1):
"""
Returns cumulative probabilities of means being selected for loq-quality image syntheses
losses0 - previous losses (smoothed)
losses1 - losses after the current training run
"""
diffs = movingAverage(losses0, lossSmoothingBoxcarSize) - movingAverage(losses1, lossSmoothingBoxcarSize)
diffs[diffs < 0] = 0
max_diff = np.max(diffs)
if max_diff == 0:
max_diff = 1
diffs += 0.05*max_diff
cumDiffs = np.cumsum(diffs)
cumProbs = cumDiffs / np.max(cumDiffs, axis=None)
return cumProbs.astype(np.float32)
def main(unused_argv=None):
temp = set(tf.all_variables())
log = open(log_file, 'a')
#with tf.device("/gpu:0"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #For batch normalisation windows
with tf.control_dependencies(update_ops):
lq, img = input_fn(trainDir)
loss, prediction = architecture(lq, img, tf.estimator.ModeKeys.TRAIN)
train_op = tf.train.AdamOptimizer().minimize(loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.7
#saver = tf.train.Saver(max_to_keep=-1)
tf.add_to_collection("train_op", train_op)
tf.add_to_collection("update_ops", update_ops)
with tf.Session(config=config) as sess: #Alternative is tf.train.MonitoredTrainingSession()
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
train_writer = tf.summary.FileWriter( logDir, sess.graph )
#Set up mean probabilities to be dynamically adjusted during training
probs = np.ones(numMeans, dtype=np.float32)
losses0 = np.empty([])
global cumProbs
cumProbs = np.cumsum(probs)
cumProbs /= np.max(cumProbs)
#print(tf.all_variables())
counter = 0
cycleNum = 0
while True:
cycleNum += 1
#Train for a couple of hours
time0 = time.time()
while time.time()-time0 < modelSavePeriod:
counter += 1
#merge = tf.summary.merge_all()
_, loss_value = sess.run([train_op, loss])
print("Iter: {}, Loss: {:.6f}".format(counter, loss_value))
log.write("Iter: {}, Loss: {:.6f}".format(counter, loss_value))
#train_writer.add_summary(summary, counter)
#Save the model
#saver.save(sess, save_path=model_dir+"model", global_step=counter)
tf.saved_model.simple_save(
session=sess,
export_dir=model_dir+"model-"+str(counter)+"/",
inputs={"lq": lq},
outputs={"prediction": prediction})
#predict_fn = tf.contrib.predictor.from_saved_model(model_dir+"model-"+str(counter)+"/")
#loaded_img = imread("E:/stills_hq/reaping1.tif", mode='F')
#loaded_img = scale0to1(cv2.resize(loaded_img, (cropsize, cropsize), interpolation=cv2.INTER_AREA))
#cv2.namedWindow('dfsd',cv2.WINDOW_NORMAL)
#cv2.imshow("dfsd", loaded_img)
#cv2.waitKey(0)
#prediction1 = predict_fn({"lq": loaded_img})
#cv2.namedWindow('dfsd',cv2.WINDOW_NORMAL)
#cv2.imshow("dfsd", prediction1['prediction'].reshape(cropsize, cropsize))
#cv2.waitKey(0)
#Evaluate the model and use the results to dynamically adjust the training process
losses = np.zeros(numMeans, dtype=np.float32)
for i in range(numMeans):
for _ in range(numDynamicGrad):
losses[i] += sess.run(loss)
print(i, losses[i])
losses[i] /= numDynamicGrad
np.save(model_dir+"losses-"+str(counter), losses)
#cumProbs = get_training_probs(losses0, losses)
losses0 = losses
return
if __name__ == "__main__":
tf.app.run()
|
984,761 | eb3eba8f1b82f4f9776df7ad4d89ce0acd48f373 | from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
botFiles = []
midFiles = []
topFiles = []
botSoups = []
midSoups = []
topSoups = []
somasBot = []
somasMid = []
somasTop = []
somasAcostamento = []
#[cima: [valor0, ..., valor 5]]
#index = [vehAcostamento1, vehAcostamento2, vehAcostamento3]
for index in range (0, 6):
file = open('laneDetBot' + str(index) + '.xml', 'r')
botFiles.append(file)
file = open('laneDetMid' + str(index) + '.xml', 'r')
midFiles.append(file)
file = open('laneDetTop' + str(index) + '.xml', 'r')
topFiles.append(file)
soup = BeautifulSoup(botFiles[index], 'lxml')
botSoups.append(soup)
soup = BeautifulSoup(midFiles[index], 'lxml')
midSoups.append(soup)
soup = BeautifulSoup(topFiles[index], 'lxml')
topSoups.append(soup)
somasBot.append(0)
somasAcostamento.append(0)
counter = 0
for interval in botSoups[index].find_all('interval'):
somasBot[index] += float(interval.get('jamlengthinvehiclessum'))
somasAcostamento[index] += float(interval.get('nvehentered'))
counter += 1
somasBot[index] /= counter
somasMid.append(0)
counter = 0
for interval in midSoups[index].find_all('interval'):
somasMid[index] += float(interval.get('jamlengthinvehiclessum'))
counter += 1
somasMid[index] /= counter
somasTop.append(0)
counter = 0
for interval in topSoups[index].find_all('interval'):
somasTop[index] += float(interval.get('jamlengthinvehiclessum'))
counter += 1
somasTop[index] /= counter
dict = {'esquerda': somasTop, 'meio': somasMid, 'acostamento': somasBot}
dataFrame = pd.DataFrame (dict, index=somasAcostamento)
print (dict)
dataFrame = dataFrame.astype (float)
ax = dataFrame.plot(title="Relação Entre Comprimentos dos Congestionamentos e Número de Motoristas Infratores")
ax.set_xlabel("Número de carros que entraram no acostamento")
ax.set_ylabel("Soma dos comprimentos dos congestionamentos (número de veículos)")
plt.show()
|
984,762 | 8cc0f616a49ee9a2a53d9cc566eb9f014e041b6e | '''
Day: 17
File: conway_cubes.py
Author: Rishabh Ranjan
Last Modified: 12/17/2020
'''
import copy
class Cube:
neighbors = {}
def __init__(self, coordinates, active):
self.coordinates = coordinates
self.active = active
def populate_neighbors(self, cubes, expand):
for i in range(self.coordinates[0] - 1, self.coordinates[0] + 2):
for j in range(self.coordinates[1] - 1, self.coordinates[1] + 2):
for k in range(self.coordinates[2] - 1, self.coordinates[2] + 2):
if len(self.coordinates) == 3:
if (expand or (not expand and (i, j, k) in cubes)) and (i, j, k) != self.coordinates:
if self.coordinates in Cube.neighbors:
Cube.neighbors[self.coordinates].add((i, j , k))
else:
Cube.neighbors[self.coordinates] = {(i, j, k)}
elif len(self.coordinates) == 4:
for l in range(self.coordinates[3] - 1, self.coordinates[3] + 2):
if (expand or (not expand and (i, j, k, l) in cubes)) and (i, j, k, l) != self.coordinates:
if self.coordinates in Cube.neighbors:
Cube.neighbors[self.coordinates].add((i, j , k, l))
else:
Cube.neighbors[self.coordinates] = {(i, j, k, l)}
def simulate_cubes(cubes):
for cube in cubes.values():
cube.populate_neighbors(cubes, True)
for neighbor_list in Cube.neighbors.values():
for coordinates in neighbor_list:
if not coordinates in cubes:
cubes[coordinates] = Cube(coordinates, False)
for cube in cubes.values():
cube.populate_neighbors(cubes, False)
cubes_copy = copy.deepcopy(cubes)
for cube in cubes.values():
num_active_neighbors = 0
for neighbor_coordinates in Cube.neighbors[cube.coordinates]:
if cubes_copy[neighbor_coordinates].active:
num_active_neighbors += 1
if cube.active and num_active_neighbors != 2 and num_active_neighbors != 3:
cube.active = False
elif not cube.active and num_active_neighbors == 3:
cube.active = True
return cubes
def main():
f = open('day_17_input.txt', 'r')
initial_state = f.read().splitlines()
f.close()
cubes = {}
for i in range(len(initial_state)):
for j in range(len(initial_state[0])):
cubes[(i, j, 0)] = Cube((i, j, 0), True if initial_state[i][j] == '#' else False)
for i in range(6):
cubes = simulate_cubes(cubes)
count = 0
for cube in cubes.values():
if cube.active:
count += 1
print("Part 1 Answer: ", count)
cubes = {}
Cube.neighbors = {}
for i in range(len(initial_state)):
for j in range(len(initial_state[0])):
cubes[(i, j, 0, 0)] = Cube((i, j, 0, 0), True if initial_state[i][j] == '#' else False)
for i in range(6):
cubes = simulate_cubes(cubes)
count = 0
for cube in cubes.values():
if cube.active:
count += 1
print("Part 2 Answer: ", count)
if __name__ == '__main__':
main()
|
984,763 | f0b1db42d29a3975774297975b3ca1bc87f69ba3 | from django.db import models
from django.contrib.auth.models import User
from datetime import time, datetime
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save
def timediff(t1,t2):
diff = (t1.hour-t2.hour)*60+t1.minute-t2.minute
if (diff<0):
diff += 24*60
return diff/60.0
# Create your models here.
class Timesheet(models.Model):
user = models.ForeignKey(User)
created = models.DateField(auto_now_add=True)
downloaded = models.DateField(blank=True, null=True)
def _get_is_downloaded(self):
return (self.downloaded!=None)
def _set_is_downloaded(self,velue):
self.downloaded = datetime.now()
is_downloaded = property(_get_is_downloaded,_set_is_downloaded)
class Entry(models.Model):
date = models.DateField()
start_time = models.TimeField()
end_time = models.TimeField()
user = models.ForeignKey(User)
timesheet = models.ForeignKey(Timesheet, null=True, blank=True)
def get_timediff(self):
return timediff(self.end_time,self.start_time)
def get_timediffstring(self):
diff = self.get_timediff()
return "% 2.2f" % diff
def get_shortdatestring(self):
return "%02i.%02i" % (self.date.day, self.date.month)
def get_weeknumber(self):
return self.date.isocalendar()[1]
def save(self, *args, **kwargs):
if (self.start_time>=self.end_time): raise ValidationError("Requirement: start time < end time")
c = Entry.objects.filter(
user=self.user,
date=self.date,
start_time__lte=self.start_time,
end_time__gt=self.start_time
).exclude(pk=self.id).count()
if c > 0: raise ValidationError("Cannot create multiple entries in the same time interval")
c = Entry.objects.filter(
user=self.user,
date=self.date,
start_time__lt=self.end_time,
end_time__gte=self.end_time
).exclude(pk=self.id).count()
if c > 0: raise ValidationError("Cannot create multiple entries in the same time interval")
c = Entry.objects.filter(user=self.user, timesheet=None).count()
if c > 27 and not self.id: raise ValidationError("Limit reached. Please bill current entries before adding more")
super(Entry,self).save(args, kwargs)
class UserProfile(models.Model):
birth_date = models.CharField(max_length=6, blank=True)
p_no = models.CharField(max_length=5, blank=True)
address = models.CharField(max_length=255, blank=True)
zip_code = models.CharField(max_length=4, blank=True)
city = models.CharField(max_length=255, blank=True)
user = models.ForeignKey(User, unique=True)
skattekommune = models.CharField(max_length=5, blank=True)
account_number = models.CharField(max_length=11, blank=True)
def __unicode__(self):
return unicode(self.user.username)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
|
984,764 | 718c0252fba23a82c001f309a5af01654c6fca42 | from flask import Flask, render_template, request, redirect
from datetime import datetime
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/checkout', methods=['POST'])
def checkout():
print(request.form)
def suffix(day):
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return suffix
x = datetime.now()
month = x.strftime("%B")
day = int(x.strftime("%d"))
suffix = suffix(day)
day = f"{day}{suffix}"
year = x.strftime("%Y")
hour = int(x.strftime("%I"))
minute = x.strftime("%M")
sec = x.strftime("%S")
am_pm = x.strftime("%p")
y = f"{month} {day}, {year} at {hour}:{minute}:{sec} {am_pm}"
strawberry = int(request.form['strawberry'])
raspberry = int(request.form['raspberry'])
apple = int(request.form['apple'])
first_name = request.form['first_name']
last_name = request.form['last_name']
id = request.form['student_id']
sum = strawberry + raspberry + apple
print(f"Charging {first_name} {last_name} for {sum} fruits")
return render_template("checkout.html", strawberry = strawberry,
raspberry = raspberry, apple = apple, first_name = first_name,
last_name = last_name, id = id, sum = sum, y = y)
@app.route('/fruits')
def fruits():
return render_template("fruits.html")
if __name__=="__main__":
app.run(debug=True) |
984,765 | 1358de69aaa209fb62d0cdef313bf6a6aeb244eb | import torch
import numpy as np
import cv2
import network
GENERATOR_WEIGHTS = './model/generator-1000.pt'
IMG_FILEPATH = './street.jpg'
RESULT_FILEPATH = './result.png'
RGB_MEAN = np.array([0.4560, 0.4472, 0.4155])
generator = network.Generator()
generator.load_state_dict(torch.load(GENERATOR_WEIGHTS))
generator.cuda()
generator.eval()
img = cv2.imread(IMG_FILEPATH)
height, width, _ = img.shape
height = height - height%4
width = width - width%4
img = img[:height, :width, :]
img = np.array(img[...,::-1])
mask = np.zeros((1, height, width))
y1 = int(0.25 * height)
y2 = int(0.75 * height)
x1 = int(0.25 * width)
x2 = int(0.75 * width)
mask[:, y1: y2, x1: x2] = 1.0
with torch.no_grad():
img = torch.FloatTensor(np.expand_dims(img, 0)).cuda()
mask = torch.FloatTensor(np.expand_dims(mask, 0)).cuda()
mean = torch.FloatTensor(RGB_MEAN).cuda()
img = img/255.0
img_input = (img - mean).permute(0, 3, 1, 2)
img_oryginal = img.permute(0, 3, 1, 2)
generator_input = torch.cat((img_input *(1.0 - mask), mask), 1)
raw_completed = generator(generator_input)
completed_global = raw_completed*mask + img_oryginal * (1.0-mask)
completed_global = completed_global * 255.0
img = completed_global.data.cpu().numpy()[0]
img = img.transpose(1, 2, 0)
img = img[...,::-1].astype(np.uint8)
cv2.imwrite(RESULT_FILEPATH, img)
print 'evaluation done'
|
984,766 | 7bfa2950cdca99a077d51ecd12dc8d25af092e49 | print('hey git') |
984,767 | 999e0ad22b70c46ca29125a7d0e5071a3de6c519 | import os
if __name__ == '__main__':
from PyInstaller.__main__ import run
opts=['webApi2doc.py','-D']
run(opts)
|
984,768 | 0d75fade2f1d623f5e0b737fd4b5b5ceb13472af | import django
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'pincodesearch.settings'
django.setup()
from searchapp.models import PincodeRecord
file1 = open('pincode.csv')
lines = set(file1.readlines())
file1.close()
dbdata = set()
dbvals = PincodeRecord.objects.all().values()
for val in dbvals:
dbdata.add(','.join([str(i) for i in val.values()][1:])+'\n')
missing = lines - dbdata
|
984,769 | f824f2f42746b43fd31af87a3b5f3d892ec42248 | #work without numpy, bc we cannot download numpy
import time
import busio
import board
import adafruit_amg88xx
import pickle
i2c = busio.I2C(board.SCL, board.SDA)
amg = adafruit_amg88xx.AMG88XX(i2c)
#shared = {"arr":amg.pixels}
#fp = open("shared.pkl", 'wb') #pickle to share amg.pixels array
#f = open("data.txt", "w+") #opens file to write thermal data to
while True:
f = open("/var/www/thermalData.txt", "w+") #opens file to write thermal data to
#fp = open("shared.pkl", 'wb') #pickle to share amg.pixels array
#pickle.dump(amg.pixels, fp, protocol=2)
for row in amg.pixels:
#print(['{0:.1f}'.format(temp) for temp in row])
#print("")
for temp in row:
f.write('{0:.1f}'.format(temp))
f.write(" ")
# Pad to 1 decimal place
#f.write(['{0:.1f}'.format(temp) for temp in row])
#f.write("")
f.write("\n")
f.write("\n")
#print("\n")
f.close()
time.sleep(1)
|
984,770 | b6b9ff5896b60889f35cbccf91635b65c6bfb7a4 | custDB = [["0796299991","yang",0,1234]]
def output(sender_id,reciver_id,amountsent,transation,db):
global custDB
custDB = db
sender(amountsent,reciver_id,transation)
reciver(amountsent,sender_id,transation)
def sender(amount,reciver_name,transation):
print(custDB[reciver_name][1],"has recived",amount,"transation number",transation)
def reciver(amount,sender_name,transation):
print(custDB[sender_name][1],"has sent",amount,"transation number",transation)
#output(0,0,9,8)
|
984,771 | a707284f893f91491f312ef471ef93878d3919f7 | """
Methods and object to generate alignments between datasets
"""
from fcm.alignment.align_data import DiagonalAlignData, CompAlignData, FullAlignData
from fcm.alignment.cluster_alignment import AlignMixture
__all__ = ['DiagonalAlignData',
'CompAlignData',
'FullAlignData',
'AlignMixture',
]
|
984,772 | 32ccd2c3647e9adda01d9fd03bb758a211b3e8b2 | '''
equation module
'''
from __future__ import print_function, division, unicode_literals
import re
import six
import argparse
import sys
import os
from sympy import sympify, simplify
from collections import Counter
DESCRIPTION = '''
canonicalize equations
Transform equation into canonical form.
An equation can be of any order. It may contain any amount of variables and brackets.
The equation will be given in the following form:
P1 + P2 + ... = ... + PN
where P1..PN - summands, which look like:
ax^k
where a - floating point value;
k - integer value;
x - variable (each summand can have many variables).
For example:
"x^2 + 3.5xy + y = y^2 - xy + y"
should be transformed into: "x^2 - y^2 + 4.5xy = 0"
"x = 1" => "x - 1 = 0"
"x - (y^2 - x) = 0" => "2x - y^2 = 0"
"x - (0 - (0 - x)) = 0" => "0 = 0"
etc
explicit multiplication is acceptable: 2x and 2*x are valid terms
Python syntax for power operator is acceptable: x^2 and x**2 are the same
'''
EPILOG = '''
Copyright 2017 Serban Teodorescu.
Licensed under the MIT License
'''
VAR_NAMES = ['t', 'u', 'v', 'w', 'x', 'y', 'z']
POLY_SPLIT = re.compile(r'(\+|-|\(|\)|\[|\]|\{|\}|=)')
'''
:var POLY_SPLIT: split the polynomial so that we can treat each term separately
'''
POLY_VALID = re.compile(r'^[a-z0-9 =\-\+\*\^\(\)\[\]\{\}\.,]+$')
'''
:var POLY_VALID: characters that are acceptable
'''
POLY_TERM = re.compile(
r'''( # group match float in all formats
(\d+(\.\d*)?|\.\d+) # match numbers: 1, 1., 1.1, .1
([eE][-+]?\d+)? # scientific notation: e(+/-)2 (*10^2)
)? # 0 or one time
([{}]+)? # variables extracted from VAR_NAMES
# 0 or one time
(\^)? # exponentiation 0 or one time
(\d+)? # exponent 0 or one time
'''.format(''.join(VAR_NAMES)), re.VERBOSE)
'''
:var POLY_TERM: parse polynomial terms
'''
ALL_VARS_POLY_TERM = re.compile(
r'''( # group match float in all formats
(\d+(\.\d*)?|\.\d+) # match numbers: 1, 1., 1.1, .1
([eE][-+]?\d+)? # scientific notation: e(+/-)2 (*10^2)
)? # 0 or one time
([a-z]+)? # variables extracted from VAR_NAMES
# 0 or one time
(\^)? # exponentiation 0 or one time
(\d+)? # exponent 0 or one time
''', re.VERBOSE)
def get_args():
parser = argparse.ArgumentParser(
description='{}\nvalid variables: {}'.format(
DESCRIPTION, ', '.join(VAR_NAMES)),
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-b', '--batch', action='store_true',
help='process equations in batch')
parser.add_argument(
'-i', '--input-file', action='store', default='equations.in',
help='get the equations from this file in batch mode')
parser.add_argument(
'-o', '--output-file', action='store', default='equations.out',
help='write the canonicalized equations to this file in batch mode')
args_as_dict = vars(parser.parse_args())
return args_as_dict
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
rgs = get_args()
if not rgs['batch']:
from six.moves import input
while True:
equation = input('enter an equation>>> ')
print(Equation(equation).canonicalize(), '\n')
else:
try:
os.remove(rgs['output_file'])
except:
pass
with open(rgs['input_file'], 'r') as fh:
equations = fh.readlines()
for equation in equations:
Equation(equation).canonicalize_to_file(rgs['output_file'])
class NoEquationError(Exception):
'''
raise when there is no equation to process
'''
pass
class InvalidEquationError(Exception):
'''
raise if there is stuff not matching POLY_VALID
'''
pass
class InvalidTermInEquationError(Exception):
'''
raised when a polynomial term does not respect the rules
'''
pass
class UnexpectedVariableNamesError(Exception):
'''
raise when an unexpected varaible name is used
'''
pass
def process_term(token):
'''
prepare a polynomial term for symbolic computation
the re.match will return 7 groups:
* group 0 is the coefficient
* group 4 is the variable or product of variables (x or xy)
* group 5 is the exponentiation
* group 6 is the exponent
'''
coefficient = ''
exponentiation = ''
variables = ''
term = re.match(POLY_TERM, token)
# TODO: to detect variable names that we don't know about, we repeat the
# match but with an all chars pattern in the variables group
# begs the question: what happens with multichar variable names?
# this is where implicit multiplication bites us; were it not permitted
# this would never manifest itself
no_term = re.match(ALL_VARS_POLY_TERM, token)
if no_term.groups()[4] and not term.groups()[4]:
# we have variables but they're unknown, grrrr
raise UnexpectedVariableNamesError(
'unexpected variable in term %s. accepted variable names are: %s' % (token, ', '.join(VAR_NAMES)))
if term.groups()[5] and not term.groups()[6]:
raise InvalidTermInEquationError(
'exponentiation with no exponent in term %s' % term)
if term.groups()[5] and term.groups()[6]:
# use ** to force Python syntax
exponentiation = '**{}'.format(term.groups()[6])
if term.groups()[0] and not term.groups()[4]:
coefficient = term.groups()[0]
return coefficient
if term.groups()[0] and term.groups()[4]:
# expand implicit multiplication between coefficient and variable
coefficient = '{}*'.format(term.groups()[0])
if term.groups()[4]:
if Counter(
[var_name in term.groups()[4] for
var_name in VAR_NAMES])[True] == 1:
variables = term.groups()[4]
if Counter(
[var_name in term.groups()[4] for
var_name in VAR_NAMES])[True] > 1:
# we have a multivariable term here and we need to expand the
# implicit multiplication between vars
# Counter returns a dictionary {True: #inlist, False: #notinlist}
variables = '*'.join(
[var_name for var_name in VAR_NAMES
if var_name in term.groups()[4]])
return '{}{}{}'.format(coefficient, variables, exponentiation)
class Equation(object):
'''
represnts an equation
'''
def __init__(self, equation=None):
'''
:arg str equation:
'''
if equation is None:
raise NoEquationError('must provide an equation')
if not isinstance(equation, six.text_type):
# not a string? coerce it
# use six.text_type to handle both python 2 and python 3
equation = str(equation)
self.equation = equation
self._validate_equation()
self._sanitize_equation()
self.left_hand_side, self.right_hand_side = self._process_equation(
).split('=')
def canonicalize(self):
'''
returns the canonical form of the equation
also convert to the required syntax: implicit multiplication and ^ for
power ops
'''
ret = '{} = 0'.format(
simplify(
sympify(self.left_hand_side) - sympify(self.right_hand_side)))
return ret.replace('**', '^').replace('*', '')
def canonicalize_to_file(self, file_name):
'''
canonicalize to file
wrtie mode is append
'''
with open(file_name, 'a') as fh:
fh.write('{}\n'.format(self.canonicalize()))
def _process_equation(self):
'''
rebuild the equation after validating each term
this is where we expand implied multiplications and use correct
Python syntax for exponentiation
'''
processed_equation = ''
for token in re.split(POLY_SPLIT, self.equation):
if not token:
continue
if token in ['+', '-', '=', '(', ')', '[', ']', '{', '}']:
processed_equation += token
continue
# now it gets interesting
processed_equation += process_term(token)
return processed_equation
def _sanitize_equation(self):
'''
replace Python style exponentiation (**) with caret (^); we will
revert that later
remove white space
#TODO: all white space, not just spaces
remove explicit multiplication, it makes parsing easier; we will
revert that later as well
'''
self.equation = self.equation.replace('**', '^')
self.equation = self.equation.replace('*', '')
self.equation = self.equation.replace(' ', '')
def _validate_equation(self):
'''
there are some characters or character combinations that are just not
allowed
'''
if not re.match(POLY_VALID, self.equation):
raise InvalidEquationError(
'bad characters in equation %s' % self.equation)
if self.equation.count('=') > 1:
raise InvalidEquationError(
'cannot have more than one = sign in equation %s'
% self.equation)
if self.equation.count('++'):
raise InvalidEquationError(
'repeated + sign in equation %s' % self.equation)
if self.equation.count('--'):
raise InvalidEquationError(
'repeated - sign in equation %s' % self.equation)
if self.equation.count('+-') or self.equation.count('-+'):
raise InvalidEquationError(
'+- or -+ sign combination in equation %s' % self.equation)
if self.equation.count('^^'):
raise InvalidEquationError(
'unknown operation ^^ in equation %s' % self.equation)
if self.equation.count('***'):
raise InvalidEquationError(
'unknown operation *** in equation %s' % self.equation)
if __name__ == "__main__":
main()
|
984,773 | 9bf81ec9c9012f26f76b817ad008edcf5d91174e | # """
# This is BinaryMatrix's API interface.
# You should not implement it, or speculate about its implementation
# """
#class BinaryMatrix(object):
# def get(self, row: int, col: int) -> int:
# def dimensions(self) -> list[]:
class Solution:
def leftMostColumnWithOne(self, binaryMatrix: 'BinaryMatrix') -> int:
dim = binaryMatrix.dimensions()
rows, columns = dim
# by default it is the last column
min_index_values = columns
for row in range(rows):
l, h = 0, columns - 1
while l < h:
mid = l + (h -l) // 2
if binaryMatrix.get(row, mid) == 0:
l = mid + 1
else:
h = mid
if binaryMatrix.get(row, l) == 1:
# we store the column value and return the lowest column value
# among all the rows
min_index_values = min(min_index_values, l)
# if it is not the last column, so we have a value less than max col value
if min_index_values != columns:
return min_index_values
return -1
|
984,774 | ad84044f97f0ed27b02b9c123de79f1b8563209a | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, k = map(int, input().split())
mod = 998244353
left = [0 for _ in range(k)]
right = [0 for _ in range(k)]
dp = [0 for _ in range(n + 10)]
dp[1] = 1
imos = [0 for _ in range(n + 10)]
for i in range(k):
li, ri = map(int, input().split())
left[i] = li
right[i] = ri
for i in range(1, n + 1):
dp[i] += imos[i]
dp[i] %= mod
for l, r in zip(left, right):
next_left = i + l
next_right = i + r + 1
if next_left > n:
continue
imos[next_left] += dp[i]
imos[next_left] %= mod
if next_right > n:
continue
imos[next_right] -= dp[i]
imos[next_right] %= mod
imos[i + 1] += imos[i]
imos[i + 1] %= mod
print(dp[n])
if __name__ == "__main__":
main()
|
984,775 | f4f256e8a69283fb567df0fd6abe9c411ded396b | from .elf_int_8_bitmask import ElfInt8BitMask
from .elf_int_16_bitmask import ElfInt16BitMask
from .elf_int_32_bitmask import ElfInt32BitMask
from .elf_int_64_bitmask import ElfInt64BitMask
from .elf_int_n_bitmask import ElfIntNBitMask |
984,776 | 5a2583d08d1262cd9097dacf2582b7d92d8a616e | from src import app, mongo
from flask import render_template, jsonify, json, request
from flask_restplus import Resource, fields
from bson import json_util, errors
from bson.objectid import ObjectId
from .user import namespace
post_fields = namespace.model("Post", {"title": fields.String, "content": fields.String })
@namespace.route('/post')
class Post(Resource):
@namespace.doc(description='<h3>list all posts</h3>')
def get(self):
try:
post_collections = mongo.db.posts
return [ json.loads(json_util.dumps(doc, default=json_util.default)) for doc in post_collections.find({"isDelete":False})]
except Exception as e:
return {"error": str(e)}
@namespace.doc(description='create a new post')
@namespace.expect(post_fields)
def post(self):
try:
post_info = request.get_json()
post_info["isDelete"] = False
mongo.db.posts.insert(post_info)
return 'added post'
except Exception as e:
return {"error": str(e)}
@namespace.route('/post/<string:id>')
class SinglePost(Resource):
@namespace.doc(description='get a single post')
def get(self, id):
try:
post_collections = mongo.db.posts
return [ json.loads(json_util.dumps(doc, default=json_util.default)) for doc in post_collections.find({"_id":ObjectId(id),"isDelete":False})]
except Exception as e:
return {"error": str(e)}
@namespace.doc(description='update a post')
def patch(self):
try:
post_info = request.get_json()
mongo.db.posts.update({"_id":ObjectId(id)},{"$set":post_info})
return {"msg":'updated post'}
except Exception as e:
return {"error": str(e)}
@namespace.doc(description='Delete a post')
def delete(self, id, commentId):
try:
mongo.db.posts.update({"_id":ObjectId(commentId)},{"$set":{"isDelete":True}})
return 'post is deleted'
except Exception as e:
return {"error": str(e)} |
984,777 | f74e1eb645de6b8cb596875bbda3f6c3cbec9766 | # Generated by Django 3.1.4 on 2021-05-01 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0011_submission_status'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
migrations.AlterField(
model_name='submission',
name='speaker',
field=models.ForeignKey('users.User', db_constraint=False, db_index=True, null=False, on_delete=models.PROTECT)
),
],
state_operations=[
migrations.RemoveField(
model_name='submission',
name='speaker',
),
migrations.AddField(
model_name='submission',
name='speaker_id',
field=models.IntegerField(verbose_name='speaker'),
),
]
),
]
|
984,778 | 87517b13da7cf809b6c204c1a5b4ac7a56816ccd | import pylab
import random
def showDiscreteUniform(a,b,numPoints):
points = []
for m in range(numPoints):
points.append(random.randint(a,b))
pylab.figure()
pylab.hist(points,100,normed=True)
pylab.title('Discrete Uniform distribution with ' +str(numPoints) +" points")
pylab.show()
showDiscreteUniform(1,100,100000)
|
984,779 | 0ac0ba7019cdf8dfea4b4c571df6ba3005fe4f29 | """
Run additional tasks around dataset anonymization.
A framework for running additional tasks using the datasets that will be
anonymized. Like Unix commands, a pipeline consists of a list of Filters.
A Filter is a single part of the pipeline that has an opportunity to act
1. before any datasets are anonymized,
2. before each dataset is anonymized,
3. after each dataset has been anonymized, and finally
4. after all the datasets have been anonymized
For each "before" stage, the filters will be executed in the order they were
added to the pipeline, and for each "after" stage, the Filters will be executed
in reverse order.
If a Pipeline is created with two Filters
>>> pipeline = Pipeline()
>>> pipeline.add(Filter1())
>>> pipeline.add(Filter2())
And run as an anonymization session on two datasets, the following calls would be made:
* Filter1.before_any()
* Filter2.before_any()
* Filter1.before_each(dataset1)
* Filter2.before_each(dataset1)
* Filter2.after_each(dataset1)
* Filter1.after_each(dataset1)
* Filter1.before_each(dataset2)
* Filter2.before_each(dataset2)
* Filter2.after_each(dataset2)
* Filter1.after_each(dataset2)
* Filter2.after_all()
* Filter1.after_all()
"""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import pydicom
class Filter:
"""Actions to run around dataset anonymization."""
def before_any(self) -> None:
"""Run before any datasets are anonymized."""
def before_each(self, dataset: pydicom.dataset.Dataset) -> None:
"""Run on each dataset before it is anonymized."""
def after_each(self, dataset: pydicom.dataset.Dataset) -> None:
"""Run on each dataset after it has been anonymized."""
def after_all(self) -> None:
"""Run after all datasets have been anonymized."""
class Pipeline:
"""A collection of actions to run around dataset anonymization."""
def __init__(self) -> None:
"""Create an empty Pipeline."""
self.filters: list[Filter] = []
def add(self, new_filter: Filter) -> None:
"""
Add a new filter to the pipeline.
The new filter's before_each and before methods will be run
after previously-added filters. Its after and after_each methods
will be run before previously-added filters.
"""
self.filters.append(new_filter)
def before_any(self) -> None:
"""
Run before any datasets are anonymized.
Each filter's before_any method will be run in the order the
filter was added to the pipeline.
"""
for a_filter in self.filters:
a_filter.before_any()
def before_each(self, dataset: pydicom.dataset.Dataset) -> None:
"""
Run on each dataset before it is anonymized.
Each filter's before_each method will be run in the order the
filter was added to the pipeline.
"""
for a_filter in self.filters:
a_filter.before_each(dataset)
def after_each(self, dataset: pydicom.dataset.Dataset) -> None:
"""
Run on each dataset after it is anonymized.
Each filter's after_each method will be run in the opposite order
that the filter was added to the pipeline.
"""
for a_filter in self.filters[::-1]:
a_filter.after_each(dataset)
def after_all(self) -> None:
"""
Run after all datasets have been anonymized.
Each filter's after_all method will be run in the opposite order
that the filter was added to the pipeline.
"""
for a_filter in self.filters[::-1]:
a_filter.after_all()
|
984,780 | 6a0d5a957e47dc7cfaf12c365a3e39b3285138c8 | """
This module contains some utility functions for the SetAPI.
"""
import os
import re
from installed_clients.DataFileUtilClient import DataFileUtil
def check_reference(ref):
"""
Returns True if ref looks like an actual object reference: xx/yy/zz or xx/yy
Returns False otherwise.
"""
obj_ref_regex = re.compile("^((\d+)|[A-Za-z].*)\/((\d+)|[A-Za-z].*)(\/\d+)?$")
# obj_ref_regex = re.compile("^(?P<wsid>\d+)\/(?P<objid>\d+)(\/(?P<ver>\d+))?$")
if ref is None or not obj_ref_regex.match(ref):
return False
return True
def build_ws_obj_selector(ref, ref_path_to_set):
if ref_path_to_set and len(ref_path_to_set) > 0:
return {
'ref': ';'.join(ref_path_to_set)
}
return {'ref': ref}
def populate_item_object_ref_paths(set_items, obj_selector):
"""
Called when include_set_item_ref_paths is set.
Add a field ref_path to each item in set
"""
for set_item in set_items:
set_item["ref_path"] = obj_selector['ref'] + ';' + set_item['ref']
return set_items
def dfu_get_obj_data(obj_ref):
dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'])
obj_data = dfu.get_objects(
{"object_refs": [obj_ref]})['data'][0]['data']
return obj_data
|
984,781 | bcb9987bae2ef20f825da03711dee64927985015 | import re
wiki = open("Indonesia.txt", "r")
teks = wiki.read()
wiki.close()
print(re.findall(r'me\w+', teks.lower()))
|
984,782 | cdc662d244e082473a6d898d9463efb2b2368075 | # cravings
from setting import *
from food import *
if __name__ == '__main__':
# run with "python3 cravings.py"
print('-'*80)
set = setting()
ingr,exclude = find_food(set)
recipe = recipe(ingr, exclude)
describe(set, recipe)
# print(recipe.get_label()) |
984,783 | c30953900abc6e490e8d23f2f099fa81f9260e48 | # -*- coding: utf-8 -*-
"""HW4Q2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sK_3UgHBi2zRe1ARdHn2wpy24xKljFhC
"""
!pip install syft
! pip install prettytable
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import syft as sy
import random
from prettytable import PrettyTable
class Arguments():
def __init__(self):
self.batch_size = 128
self.test_batch_size = 1000
self.epochs = 3
self.lr = 0.01
self.momentum = 0.5
self.no_cuda = True
self.seed = 200205699## TODO change seed to your studentID inside the class Arguments (line 17)
self.log_interval = 30
self.save_model = False
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, federated_train_loader, optimizer, epoch, participates):
model.train() # <-- initial training
for batch_idx, (data, target) in enumerate(federated_train_loader): # <-- now it is a distributed dataset
if target.location.id in participates:
model.send(data.location) # <-- NEW: send the model to the right location
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
model.get() # <-- NEW: get the model back
if batch_idx % args.log_interval == 0:
loss = loss.get() # <-- NEW: get the loss back
#print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * args.batch_size, len(federated_train_loader) * args.batch_size, 100. * batch_idx / len(federated_train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
temp = str(correct) + '/' + str(len(test_loader.dataset)) + '(' + str(100. * correct / len(test_loader.dataset)) + '%)'
'''print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))'''
return temp
### main function
args = Arguments()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
## TODO: ---- create 10 node workers ---- ##
node1 = sy.VirtualWorker(hook, id="node1")
node2 = sy.VirtualWorker(hook, id="node2")
node3 = sy.VirtualWorker(hook, id="node3")
node4 = sy.VirtualWorker(hook, id="node4")
node5 = sy.VirtualWorker(hook, id="node5")
node6 = sy.VirtualWorker(hook, id="node6")
node7 = sy.VirtualWorker(hook, id="node7")
node8 = sy.VirtualWorker(hook, id="node8")
node9 = sy.VirtualWorker(hook, id="node9")
node10 = sy.VirtualWorker(hook, id="node10")
##-------------------------------------------
## distribute data across nodes
federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
.federate((node1,node2,node3,node4,node5,node6,node7,node8,node9,node10)), ##TODO: pass the worker nodes you created here to distribute the data
batch_size=args.batch_size, shuffle=True, **kwargs)
## test dataset is always same at the central server
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
## training models in a federated appraoch
#model = Net().to(device)
#optimizer = optim.SGD(model.parameters(), lr=args.lr)
## TODO: select a random set of node ids that will be passed to the training function; these nodes will particiapte in the federated learning
#create node_list
def createnode_list(k):
nodetuple = ('node1','node2','node3','node4','node5','node6','node7','node8','node9','node10')
return random.sample(nodetuple, k)
##-------------------------------------------
'''
def weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
nn.init.zeros_(m.bias)
'''
nodenum = [3,5,7,10]
t = PrettyTable(['X', 'Accuracy (when N=3)'])
for num in nodenum:
# randomly select X nodes to participate in the learning process
node_ids = createnode_list(num)
#new untrained model
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
#reinitialize weights
#model.apply(weight_init)
accuracy = []
for epoch in range(1, args.epochs + 1):
train(args, model, device, federated_train_loader, optimizer, epoch, node_ids) ## TODO: pass the node_id list like ['node1','node2' ...]
accuracy.append(test(args, model, device, test_loader))
t.add_row([num,accuracy[-1]])
print(t)
epochlist = [args.epochs, args.epochs + 2, args.epochs + 7]
y = PrettyTable(['N', 'Accuracy (when X=5)'])
for epochval in epochlist:
# randomly select X nodes to participate in the learning process
node_ids = createnode_list(5)
#new untrained model
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
#reinitialize weights
#model.apply(weight_init)
accuracy2 = []
for epoch in range(1, epochval + 1):
train(args, model, device, federated_train_loader, optimizer, epoch, node_ids) ## TODO: pass the node_id list like ['node1','node2' ...]
accuracy2.append(test(args, model, device, test_loader))
y.add_row([epochval,accuracy2[-1]])
print(y)
if (args.save_model):
torch.save(model.state_dict(), "mnist_cnn.pt") |
984,784 | 71daa5c1c0825fcaab48eb6d7d9171664c893c79 | #!/usr/bin/env python
import commands
import sys
def gcd(a,b):
"""Compute the greatest common divisor of a and b"""
while b > 0:
a, b = b, a % b
return a
def lcm(a, b):
"""Compute the lowest common multiple of a and b"""
return a * b / gcd(a, b)
# Execute txt file
def faile_check(fail):
if fail:
print 'Failed\n'
sys.exit(1)
# Input: Each task utilization, Each task execution time
# Output: Each task periods
def Task_Periods(Each_task_U, Each_task_C):
Task1_U = float(Each_task_U[0])
Task2_U = float(Each_task_U[1])
Task1_C = float(Each_task_C[0])
Task2_C = float(Each_task_C[1])
Task1_T = int(round(Task1_C / Task1_U))
Task2_T = int(round(Task2_C / Task2_U))
return [Task1_T, Task2_T]
# delete content of a file
def deleteContent(fName):
with open(fName,'w'):
pass
# Initilize file name that extract log data at
file_name = []
file_path = "/home/sihoon/WCPSv3-master/Sihoon_ex2/Log_file/"
result_file_name = "LinkQuality_Result.txt"
result_file_path = "/home/sihoon/WCPSv3-master/Sihoon_ex2/Result_file/"
average_file_name = "Average.txt"
average_file_path = "/home/sihoon/WCPSv3-master/Sihoon_ex2/Result_file/"
### Task Period Setting ###
# it should be harmonic of all task periods
Each_task_T = [25,25]
Task_ReTx = [1, 0]
### Initilization ###
TOSSIM_simulation_times = 100;
# Index
NODEID = 0
FLOWID = 1
RCV_COUNT = 2
SUPERFRAME_LEN = lcm(Each_task_T[0], Each_task_T[1]) # check slot0 ~ slot19
# receive node id
Task1_destination = 51 # topology: 1->3->4->51
Task2_destination = 52 # topology: 2->3->4->52
# Task id
Hi_task_id = 1
Lo_task_id = 2
### Execute TOSSIM and store the result ###
deleteContent(result_file_path + result_file_name)
result_f = open(result_file_path + result_file_name, 'a')
result_f.write("Each Task Period:%s\n\n"%(Each_task_T))
result_f.close()
print("Each Task Period:%s"%(Each_task_T))
print("SUPERFRAME_LEN:%s"%(SUPERFRAME_LEN))
for N in range(TOSSIM_simulation_times):
exe_file_name = "Test"+str(N)+".txt"
deleteContent(file_path + exe_file_name)
print(exe_file_name)
fail, output = commands.getstatusoutput("python tossim-event-server.py" +" "+str(Each_task_T[0])+" "+str(Task_ReTx[0])+" "+str(Each_task_T[1])+" "+str(Task_ReTx[1]) + " >>"+str(file_path)+str(exe_file_name))
faile_check(fail)
#print output
### Gather results from N Test files to a file ###
file_name = []
for file_idx in range(TOSSIM_simulation_times):
tmp_name = "Test"+str(file_idx)+".txt"
#print("tmp_name:%s"%(tmp_name))
file_name.append(tmp_name)
# Extract essential data in a simulation
for fname in file_name:
# Check file existance
try:
f = open(file_path + fname,'r')
except:
print("No file:%s"%(fname))
exit(1)
# total result variable
tmp_Tx_count = 0
Tx_count = 0
tmp_Rx_count = 0
Rx_count = 0
lines = f.readlines()
# Store last line Data in a file
for line in lines:
# file each line: Node id, flow id, rcv_count, rcv_count_at_slot1, rcv_count_at_slot2, rcv_count_at_slot3, rcv_count_at_slot4, rcv_count_at_slot5, rcv_count_at_slot6, rcv_count_at_slot7, rcv_count_at_slot8, rcv_count_at_slot9
line_list = line.split()
if line_list:
# cheack node id
if line_list[0] == "Nodeid:":
tmp_Tx_count = line_list[3]
elif line_list[0] == "RxCount:":
tmp_Rx_count = line_list[1]
elif line_list[0] == "---Task_period_End---":
Tx_count = tmp_Tx_count
Rx_count = tmp_Rx_count
# File Close
f.close()
# Store total result in a file
result_f = open(result_file_path + result_file_name, 'a')
result_f.write("Tx_count, Rx_count: %s %s\n"%(Tx_count, Rx_count))
result_f.close()
# Average N simulation results
result_f = open(result_file_path + result_file_name, 'r')
lines = result_f.readlines()
Total_count = 0
Task2_e2e_delay = [0 for _ in range(SUPERFRAME_LEN)]
Task2_e2e_delay_aver = [0 for _ in range(SUPERFRAME_LEN)]
Task2_e2e_delay_percentage = [0 for _ in range(SUPERFRAME_LEN)]
Task2_e2e_rcv_count = 0
Task2_e2e_rcv_count_aver = 0
Tx_count = 0
Rx_count = 0
Tx_count_aver = 0
Rx_count_aver = 0
PDR = 0
for line in lines:
line_list = line.split()
if line_list:
# check task id
#print(line_list)
#print(line_list[0])
if line_list[0] == "Tx_count,":
Total_count = Total_count + 1
Tx_count = Tx_count + int(line_list[2])
Rx_count = Rx_count + int(line_list[3])
print("Tx_count:%s"%(Tx_count))
print("Rx_count:%s"%(Rx_count))
print("Total_count:%s"%(Total_count))
Tx_count_aver = float(Tx_count)/float(Total_count)
Rx_count_aver = float(Rx_count)/float(Total_count)
PDR = Rx_count_aver/Tx_count_aver
print("Tx_count_aver:%s"%(Tx_count_aver))
print("Rx_count_aver:%s"%(Rx_count_aver))
print("PDR:%s"%(PDR))
result_f.close()
|
984,785 | 9982450d70d9f4804421e937f923ca504cbaf115 | str = str(input("Enter string to be manipulated: "))
output = ''
i = 0
while i < len(str):
if i + 1 < len(str):
output = output + str[i + 1]
output = output + str[i]
i = i + 2
print('Given String: ' + str)
print('Swapped String: ' + output) |
984,786 | 12271b94a8e66e0f4f0aea8388604b6a74632daa | from flask import request, jsonify
from robot import application as app
from robot import motion_control
actions = {
'forward': motion_control.forward,
'stop': motion_control.stop,
'left': motion_control.left,
'right': motion_control.right,
'reverse': motion_control.reverse,
'forward_steer': motion_control.steer_forward_2,
'reverse_steer': motion_control.steer_reverse_2
}
@app.route('/command', methods=['POST','PUT'])
def command():
data = request.get_json()
# print(data)
action = data['action']
gas = data['gas'] if 'gas' in data else 1
multiplier = data['multiplier'] if 'multiplier' in data else 1
if action == 'forward_steer' or action == 'reverse_steer':
degree = data['degree'] if 'degree' in data else 0
actions[action](degree, gas, multiplier)
elif action in actions:
actions[action](multiplier)
return jsonify({'status':'ok'}) |
984,787 | 2a6a1f1947a6e82ea6d2ffb20b60c4c9bb738d59 | import pygame
W = 25
Wpadded = W + 6
hor = 15
ver = 15
HEIGHT = hor * Wpadded
WIDTH = ver * Wpadded
finished = False
WHITE = (255, 255, 255)
GRAY = (51, 51, 51)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
def makeGrid(x, y):
grid = []
for row in range(y):
grid.append([])
for column in range(x):
grid[row].append(Block(column, row, GRAY))
return grid
class Block:
def __init__(self, x, y, color):
self.actx = x
self.acty = y
self.x = Wpadded*x + 3
self.y = Wpadded*y + 3
self.color = color
def updateXY(self, x, y):
self.actx = x
self.acty = y
self.x = Wpadded*x + 3
self.y = Wpadded*y + 3
def draw(self, screen):
pygame.draw.rect(screen, self.color, [self.x, self.y, W, W])
class Snake:
def __init__(self, x, y):
self.head = Block(x, y, WHITE)
self.tail = []
self.dir = (0, 0)
def checkHit(self):
for part in self.tail:
if self.head.x == part.x and self.head.y == part.y:
return True
return False
def move(self):
x = self.dir[0]
y = self.dir[1]
if len(self.tail) > 0:
for i in range(len(self.tail)-1, 0, -1):
self.tail[i].updateXY(self.tail[i-1].actx, self.tail[i-1].acty)
self.tail[0].updateXY(self.head.actx, self.head.acty)
self.head.updateXY((self.head.actx + x) % hor, (self.head.acty + y) % ver)
def draw(self, screen):
self.head.draw(screen)
for part in self.tail:
part.draw(screen)
|
984,788 | 44d9f363fba172d9a7af07a21b19f229a561a2b7 | from django.test import TestCase,Client
from .models import Lamp_historique, Lamp
from datetime import datetime
class LampTest(TestCase):
def setUp(self):
lamp = Lamp.objects.create(name='LTY F', station='station1',coord_X_Y='POINT(-95.3385 29.7245)')
lamphistorique1 = Lamp_historique.objects.create(lamp=lamp, total=20, number_off_lamp_Off=10, number_off_lamp_On=10,created_At=datetime.now(),hasCamera=False,hasWifi=True,comment='hahahah')
lamphistorique2 = Lamp_historique.objects.create(lamp=lamp, total=20, number_off_lamp_Off=10, number_off_lamp_On=10,created_At=datetime.now(),hasCamera=False,hasWifi=True,comment='hahahah')
def getLampObject(self):
self.lamp = Lamp.objects.all().first()
Lhistorique = Lamp_historique.objects.all().first()
self.assertEqual(self.lamp,Lhistorique.lamp)
def getlatestLamphistorique(self):
lamphistorique1 = Lamp_historique.objects.all().order_by('-created_At').first()
lastData = Lamp_historique.objects.all().order_by('created_At').latest().created_At
self.assertEqual(lamphistorique1.created_At, lastData)
|
984,789 | 005433db247850157bd0aa860e9b614758dd9756 | '''Write a Python program to get a string made of the first 2 and the last 2 chars from a given a string. If the string length is less than 2, return instead of the empty string.'''
#Print out program purpose
print("This program will ask you to enter a string and then will print out the first 2 characters and the last 2 characters")
#Ask user to enter a string
string = input("Enter whatever you want for this string: ")
#Create a function that will return nothing if the string is less than 2 or return the first 2 and last 2 chars from the string
def stringBothEnds(str):
if len(str) < 2:
return ''
return string[0:2] + string[-2:]
#Print the string using the created function
print(stringBothEnds(string))
|
984,790 | 6456c912cea1159b4a364af161c66a9573ae1236 | import urllib.request
import urllib.parse
import json
import pandas as pd
import requests
import io
import tqdm
from pathlib import Path
import os
import shutil
from hievpy.utils import *
# ----------------------------------------------------------------------------------------------------------------------
# Generic functions
# ----------------------------------------------------------------------------------------------------------------------
def search(api_token, base_url, search_params):
""" Returns a list of HIEv records matching a set of input search parameters.
Input
-----
Required
- api_token: HIEv API token/key
- base_url: Base URL of the HIEv/Diver instance, e.g. 'https://hiev.uws.edu.au'
- search_params: Object containing metadata key-value pairs for searching
Returns
-------
List of matching hiev search results (with file download url included)
"""
request_url = f"{base_url}data_files/api_search"
request_data = search_params
# Add Auth/API token to request_data
request_data['auth_token'] = api_token
# -- Set up the http request and handle the returned response
data = urllib.parse.urlencode(request_data, True)
data = data.encode('ascii')
req = urllib.request.Request(request_url, data)
with urllib.request.urlopen(req) as response:
the_page = response.read()
encoding = response.info().get_content_charset('utf-8')
records = json.loads(the_page.decode(encoding))
return records
def search_download(api_token, base_url, search_params, path=None):
""" Performs a hievpy search and automatically downloads the matching files.
Input
-----
Required
- api_token: HIEv API token/key
- base_url: Base URL of the HIEv/Diver instance, e.g. 'https://hiev.uws.edu.au'
- search_params: Object containing metadata key-value pairs for searching
Optional
- path: Full path of download directory (if path not provided, file will be downloaded to current directory)
"""
records = search(api_token, base_url, search_params)
# Download all files returned by the search to the specified folder path (if given)
for record in tqdm.tqdm(records):
download_url = f"{record['url']}?auth_token={api_token}"
if path:
download_path = Path(path) / record['filename']
else:
download_path = record['filename']
# check if file exists, if not downloads
if not download_path.is_file():
urllib.request.urlretrieve(download_url, download_path)
def upload(api_token, base_url, upload_file, metadata):
""" Uploads a file to HIEv with associated metadata
Input
-----
Required
- api_token: HIEv API token/key
- base_url: Base URL of the HIEv/Diver instance, e.g. 'https://hiev.uws.edu.au'
- upload_file: Full path to the file to be uploaded
- metadata: Object containing metadata key-value pairs
"""
upload_url = f"{base_url}data_files/api_create?auth_token={api_token}"
files = {'file': open(upload_file, 'rb')}
response = requests.post(upload_url, files=files, data=metadata)
# Print out the outcome of the upload
if response.status_code == 200:
print(f'File {upload_file} successfully uploaded to HIEv')
else:
print(
f'ERROR - There was a problem uploading file {upload_file} to HIEv')
def update_metadata(api_token, base_url, records, updates):
""" Updates metadata on a list of records returned by hievpy search
Input
-----
Required
- api_token: HIEv API token/key
- base_url: Base URL of the HIEv/Diver instance, e.g. 'https://hiev.uws.edu.au'
- records: A list of records as returned by the hievpy search function
- updates: Object containing updated metadata key-value pairs
"""
update_url = f"{base_url}data_files/api_update?auth_token={api_token}"
counter = 0
for record in tqdm.tqdm(records):
# copy in the original ID of the search record into the file_id field of the updates
updates['file_id'] = record['file_id']
response = requests.post(update_url, data=updates)
# Tally the number of successful updates
if response.status_code == 200:
counter += 1
print(f"{counter} records of {len(records)} successfully updated")
# ---------------------------------------------------------------------------------------------------------------------
# TOA5 functions
# ----------------------------------------------------------------------------------------------------------------------
def toa5_summary(api_token, record):
""" Returns toa5 summary information (variable names, units and measurement types) for a given
individual search-returned record.
Input
-----
Required
- api_token: HIEv API token/key
- record: individual record object from the results of the hievpy search function
Returns
-------
TOA5 summary information printed to the console
"""
if is_toa5(record):
download_url = f"{record['url']}?auth_token={api_token}"
req = urllib.request.urlopen(download_url)
data = req.read()
df = pd.read_csv(io.StringIO(data.decode('utf-8')),
skiprows=1, header=None)
for column in df:
print(" ".join(str(x) for x in df[column][0:3].values))
else:
print('Error: This is not a TOA5 record')
def search_load_toa5df(api_token, base_url, search_params, biggish_data=False,
keep_files=False, multiple_delim=False,
dst_folder='./raw_data'):
""" Performs a hievpy search and loads results into a pandas dataframe given the file records
Input
-----
Required
- api_token: HIEv API token/key
- base_url: Base URL of the HIEv/Diver instance, e.g. 'https://hiev.uws.edu.au/'
- search_params: Object containing metadata key-value pairs for searching
Optional:
- biggish_data: boolean
If True files will be downloaded and datatypes optimized for memory
usage. Handy for large time series and/or using shitty computers.
- keep_files: boolean
If True will keep files after importing into dataframe.
- dst_folder: string
Path to folder files will be downloaded to.
Returns
-------
Sorted pandas dataframe of TOA5 data with index equal to TIMESTAMP and TOA5 variable names as column headers
* Notice
The top row of the original TOA5 file (logger info etc) and the units and measurement type rows are discarded
during dataframe creation. This information can alternatively be found via the toa5_summary function.
"""
# search records
records = search(api_token, base_url, search_params)
# use 'biggish data' mode
if biggish_data:
# set and create download folder if it does not exist
dst_folder = Path(dst_folder)
if not dst_folder.is_dir():
os.makedirs(dst_folder)
# display number of files beeing downloaded
print(f'Downloading {len(records)} files:')
# build download url for each file
for record in tqdm.tqdm(records):
download_url = f"{record['url']}?auth_token={api_token}"
# check if file exists, if not downloads
file_path = dst_folder / record['filename']
if not file_path.is_file():
urllib.request.urlretrieve(download_url, file_path)
# create empty dataframe to store final data
df_all = pd.DataFrame()
# loop through all downloaded files
for i in list(dst_folder.glob('*.dat')):
# read data into dataframe discarding undesired header columns
if multiple_delim:
df = pd.read_csv(i, skiprows=[0, 2, 3], na_values='NAN',
sep='\\t|,|;', engine='python')
df.columns = [i.replace('"', "") for i in df.columns]
df['TIMESTAMP'] = df['TIMESTAMP'].str.replace('"', '')
else:
df = pd.read_csv(i, skiprows=[0, 2, 3], na_values='NAN')
# generate datetimeindex
df = df.set_index('TIMESTAMP')
df.index = pd.to_datetime(df.index)
# optimize memory usage
# first get names of float, integer and object columns
float_cols = df.select_dtypes(include=['float64']).columns
integer_cols = df.select_dtypes(include=['int64']).columns
object_cols = df.select_dtypes(include=['object']).columns
# the assign dtype that uses least memory for each column
df[integer_cols] = df[integer_cols].apply(
pd.to_numeric, downcast='integer')
df[float_cols] = df[float_cols].apply(
pd.to_numeric, downcast='float')
# converting objects to category is only more memory efficient if
# less tha 50% of values are unique
for col in object_cols:
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if num_unique_values / num_total_values < 0.5:
df[col] = df[col].astype('category')
# append data
df_all = pd.concat([df_all, df], sort=False)
# delete dst_folder if wanted
if not keep_files:
shutil.rmtree(dst_folder)
else:
# print number of records found
print(f'Loading {len(records)} files:')
# create empty dataframe to save data in
df_all = pd.DataFrame()
# loop through all records and generate progressbar
for record in tqdm.tqdm(records):
# build download url for each file
download_url = f"{record['url']}?auth_token={api_token}"
# get data
req = urllib.request.urlopen(download_url)
data = req.read()
# read data into dataframe discarding undesired header columns
if multiple_delim:
df = pd.read_csv(io.StringIO(data.decode('utf-8')),
skiprows=[0, 2, 3], na_values='NAN',
sep='\\t|,|;', engine='python')
df.columns = [i.replace('"', "") for i in df.columns]
df['TIMESTAMP'] = df['TIMESTAMP'].str.replace('"', '')
else:
df = pd.read_csv(io.StringIO(data.decode('utf-8')),
skiprows=[0, 2, 3], na_values='NAN')
# generate datetimeindex
df = df.set_index('TIMESTAMP')
df.index = pd.to_datetime(df.index)
# infer data types of all other columns
df = df.infer_objects()
# append data
df_all = pd.concat([df_all, df], sort=False)
# if from_date provided sort and trim data
if 'from_date' in search_params:
df_all = df_all.sort_index()[search_params['from_date']:]
# if to_date provided sort and trim data
if 'to_date' in search_params:
df_all = df_all.sort_index()[:search_params['to_date']]
return df_all.drop_duplicates()
def logger_info(api_token, records):
"""
Returns a dataframe with logger informations contained in the first
row of Campbell Sci TOA5 files.
Input
-----
Required
- api_token: HIEv API token/key
- records: record object from the results of the hievpy search function
Returns
-------
pandas dataframe with logger informations for each file
"""
df_out = pd.DataFrame(columns=['file_type', 'station_name',
'logger_model', 'serial_no', 'os_version', 'logger_program',
'Dld_sig', 'table_name'])
for record in tqdm.tqdm(records):
if is_toa5(record):
download_url = f"{record['url']}?auth_token={api_token}"
req = urllib.request.urlopen(download_url)
data = req.read()
df = pd.read_csv(io.StringIO(data.decode('utf-8')),
skiprows=0, header=None, nrows=1)
df = df.dropna(axis=1)
df.columns = ['file_type', 'station_name', 'logger_model',
'serial_no', 'os_version', 'logger_program',
'Dld_sig', 'table_name']
df_out.loc[record['filename']] = df.iloc[0]
else:
print('Error: This is not a TOA5 record')
return df_out.sort_index()
|
984,791 | 3c861967cc443a9881c688bc26923d150f265d5c | # Generated by Django 3.1.2 on 2020-11-01 19:33
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('dimension', models.CharField(max_length=20)),
],
options={
'unique_together': {('title', 'dimension')},
},
),
migrations.CreateModel(
name='IngredientAmount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField()),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.ingredient')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('image', models.ImageField(upload_to='recipes/')),
('description', models.TextField()),
('time', models.PositiveSmallIntegerField()),
('pub_date', models.DateTimeField(auto_now_add=True, db_index=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_recipes', to=settings.AUTH_USER_MODEL)),
('ingredients', models.ManyToManyField(related_name='recipes', through='recipes.IngredientAmount', to='recipes.Ingredient')),
('tags', models.ManyToManyField(related_name='recipes', to='recipes.Tag')),
],
),
migrations.AddField(
model_name='ingredientamount',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.recipe'),
),
]
|
984,792 | a25c70b086e30d5453a6b2028947b60a2489d0ec | # -*- coding: utf-8 -*-
import os
import time
import speech
class Study:
def __init__(self):
while True:
print("请您选择,提示:请输入序号1或者2")
print("1. 学习30分钟")
print("2. 学习60分钟")
self.choice = input("您的决定: ")
print("")
if self.choice == "1":
self.total_time = 30 * 60
break
elif self.choice == "2":
self.total_time = 60 * 60
break
else:
print("您的输入值有误,请重新输入!提示:输入数字1或者2")
continue
self.start_time = time.time()
self.flag = True
if not os.path.exists("./time_data_study.txt"):
self.time_total_study = 0
else:
with open("./time_data_study.txt", "r") as f:
time_data = f.readline()
self.time_total_study = float(time_data)
# judge whether the total time reaches 8 hours
if self.time_total_study >= 8:
print("今天学习时间太久了,请做点儿别的事情吧!")
print("")
self.flag = False
if self.choice == "2" and self.time_total_study == 7.5:
print("今日剩余学习时间30分钟,请重新选择")
print("")
self.flag = False
def main_program(self):
if self.flag:
self.start_learning()
self.update_data()
def start_learning(self):
print("开始学习!")
speech.say("los geht's")
while round(time.time() - self.start_time) != self.total_time:
# 这里可以加入一些语音互动
pass
speech.say("fertig!")
print("学习完成!")
if self.choice == "1":
self.time_total_study += 0.5
if self.choice == "2":
self.time_total_study += 1
def update_data(self):
with open("./time_data_study.txt", "w+") as f:
f.write(str(self.time_total_study) + '\n')
if __name__ == "__main__":
# ML: My Life
s = Study()
s.main_program() |
984,793 | fd79ffb783b7f41d68c9c894e49c50a7cd6fa9b9 | '''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
import numpy as np
import random
def sample2D(a, n, temperature=1.0):
ri = sample(a[:n]/np.sum(a[:n]), temperature)
qi = sample(a[n:-1]/np.sum(a[n:-1]), temperature)
return (ri, qi)
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
def prettify(seq):
txt = []
nb = 0
for c in seq:
if c == '|':
nb += 1
txt.append(c)
if nb>1 and (nb-1) % 4 == 0:
txt.append('\n|')
else:
txt.append('%7s' % (c[0]+c[1]))
#txt.append('|')
return ' '.join(txt)
# load data
pieces = []
with open('chord_progressions.txt', 'r') as fp:
for line in fp.readlines():
pieces.append(line.strip().split(';'))
# separate chord in root and quality
chords = [c.split(':') for s in pieces for c in s]
c_root = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']
c_qual = np.unique([c[1] for c in chords])
root2idx = dict((c, i) for i, c in enumerate(c_root))
qual2idx = dict((c, i) for i, c in enumerate(c_qual))
idx2root = dict((i, c) for i, c in enumerate(c_root))
idx2qual = dict((i, c) for i, c in enumerate(c_qual))
# create slices for training the RNN
num_dims = len(c_root) + len(c_qual) + 1
maxlen = 20
sequences = []
for song in pieces:
c_prog = []
for i, c in enumerate(song):
if i % 4 == 0:
c_prog.append('|')
c_prog.append(c.split(':'))
sequences.append(c_prog)
print('nb sequences:', len(sequences))
# build the model: 2 stacked LSTM
Nn = 256
dout = 0.25
print('Build model...')
model = Sequential()
model.add(LSTM(Nn, return_sequences=True, input_shape=(maxlen, num_dims)))
model.add(Dropout(dout))
model.add(LSTM(Nn, return_sequences=True))
model.add(Dropout(dout))
model.add(LSTM(Nn, return_sequences=False))
model.add(Dropout(dout))
model.add(Dense(num_dims))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.load_weights(filepath="lstm_weights_n256.hdf5")
print('Generating sequences ...')
with open('gen_sequences.txt', 'wb') as fout:
for j in range(2):
# choose initial chord seq
sample_index = random.randint(0, len(sequences))
song = sequences[sample_index]
X = np.zeros((1, maxlen, num_dims), dtype=np.bool)
for t, chord in enumerate(song[:maxlen]):
if chord == '|':
X[0, t, -1] = 1
else:
X[0, t, root2idx[chord[0]]] = 1
X[0, t, qual2idx[chord[1]]+len(root2idx)] = 1
res = {'root':c_root, 'qual':c_qual, 'sample_idx':sample_index, 'samples':{}}
for diversity in [0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
gen_seq = [s for s in song[:maxlen]]
x = X.copy()
for i in xrange(48):
if len(gen_seq) % 5 == 0:
gen_seq.append('|')
x[0,:-1,:] = x[0,1:,:]
x[0,-1,:] = 0
x[0,-1,-1] = 1
preds = model.predict(x, verbose=0)[0]
nxt_chord = sample2D(preds, len(root2idx), diversity)
gen_seq.append([idx2root[nxt_chord[0]], idx2qual[nxt_chord[1]]])
x[0,:-1,:] = x[0,1:,:]
x[0,-1,:] = 0
x[0,-1,nxt_chord[0]] = 1
x[0,-1,nxt_chord[1]+len(root2idx)] = 1
print(prettify(gen_seq))
res['samples'][diversity] = gen_seq
# write to file
fout.write('\n%2i: sequence %i\n' % (j, sample_index))
for d in sorted(res['samples'].keys()):
seq = res['samples'][d]
fout.write('Diversity: %.1f\n' % d)
fout.write(prettify(seq))
fout.write('\n---\n')
# with open('tmp/sample_%i.pkl' % sample_index, 'wb') as fp:
# pickle.dump(res, fp, -1)
|
984,794 | cf48837b63b8858deb874c5ae27f37b9e0fdfd9c | import numpy as np
import pickle, os
import sklearn
from sklearn.linear_model import SGDRegressor
from sklearn.kernel_approximation import RBFSampler
import sklearn.pipeline
import virl
class RbfFunctionApproximator():
"""
Q(s,a) function approximator.
it uses a specific form for Q(s,a) where seperate functions are fitteted for each
action (i.e. four Q_a(s) individual functions)
We could have concatenated the feature maps with the action TODO TASK?
"""
def __init__(self, env, eta0= 0.01, learning_rate= "constant"):
#
# Args:
# eta0: learning rate (initial), default 0.01
# learning_rate: the rule used to control the learning rate;
# see https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html for details
#
# We create a seperate model for each action in the environment's
# action space. Alternatively we could somehow encode the action
# into the features, but this way it's easier to code up and understand.
#
#
self.eta0=eta0
self.learning_rate=learning_rate
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
self.scaler = sklearn.preprocessing.StandardScaler().fit(observation_examples)
self.feature_transformer = sklearn.pipeline.FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=100)),
("rbf2", RBFSampler(gamma=2.0, n_components=100)),
("rbf3", RBFSampler(gamma=1.0, n_components=100)),
("rbf4", RBFSampler(gamma=0.5, n_components=100))
]).fit(observation_examples)
self.models = []
for _ in range(env.action_space.n):
# You may want to inspect the SGDRegressor to fully understand what is going on
# ... there are several interesting parameters you may want to change/tune.
model = SGDRegressor(learning_rate=learning_rate, tol=1e-5, max_iter=1e5, eta0=eta0)
# We need to call partial_fit once to initialize the model
# or we get a NotFittedError when trying to make a prediction
# This is quite hacky.
model.partial_fit([self.featurize_state(env.reset())], [0])
self.models.append(model)
def featurize_state(self, state):
"""
Returns the featurized representation for a state.
"""
s_scaled = self.scaler.transform([state])
s_transformed = self.feature_transformer.transform(s_scaled)
return s_transformed[0]
def predict(self, s, a=None):
"""
Makes Q(s,a) function predictions.
Args:
s: state to make a prediction for
a: (Optional) action to make a prediction for
Returns
If an action a is given this returns a single number as the prediction.
If no action is given this returns a vector or predictions for all actions
in the environment where pred[i] is the prediction for action i.
"""
features = self.featurize_state(s)
if a==None:
return np.array([m.predict([features])[0] for m in self.models])
else:
return self.models[a].predict([features])[0]
def update(self, s, a, td_target):
"""
Updates the approximator's parameters (i.e. the weights) for a given state and action towards
the target y (which is the TD target).
"""
features = self.featurize_state(s)
self.models[a].partial_fit([features], [td_target]) # recall that we have a seperate funciton for each a
from utils import (
q_learning,
exec_policy,
get_fig,
plt
)
if __name__ == '__main__':
env = virl.Epidemic(stochastic=False, noisy=False)
rbf_file = './rbf.pkl'
if os.path.exists(rbf_file):
with open(rbf_file, 'rb') as f:
rbf_func = pickle.load(f)
print('form file load RBF success.')
else:
rbf_func = RbfFunctionApproximator(env)
# training
states = q_learning(env, rbf_func, 1500, epsilon=0.05)
# save the approximate function
with open(rbf_file, 'wb')as f:
pickle.dump(rbf_func, f)
# make dir
if not os.path.exists('./results/RBF'):
os.mkdir('./results/RBF')
for i in range(10):
id = i
for tf in range(2):
env = virl.Epidemic(problem_id=id, noisy=tf)
states, rewards, actions= exec_policy(env, rbf_func, verbose=False)
fig = get_fig(states, rewards)
if tf:
tf = 'True'
else:
tf = 'False'
plt.savefig(dpi=300, fname= './results/RBF/problem_id={}_noisy={}.jpg'.format(id, tf))
print("\tproblem_id={} noisy={} Total rewards:{:.4f}".format(id, tf, sum(rewards)))
plt.close()
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(10):
env = virl.Epidemic(stochastic=True)
states, rewards, actions= exec_policy(env, rbf_func, verbose=False)
ax.plot(np.array(states)[:,1], label=f'draw {i}')
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('Number of Infectious persons')
ax.set_title('Simulation of 10 stochastic episodes with RBF policy')
ax.legend()
plt.savefig(dpi=300, fname='./results/RBF/stochastic.png')
plt.close() |
984,795 | aefb65304169846f353a32edc7ac7f8548f9bae5 | #Python program to print Highest Common Factor (HCF) of two numbers
n1,n2 = 12,8 #4
n1,n2 = 9,21 #3
n1,n2 = 7,5 #1
n1=int(input('first num: '))
n2=int(input('second num: '))
if(n1>n2):
l,h=n1,n2
else:
l,h=n2,n1
gcd = 1
for i in range(1,h):
if(h%i==0 and l%i==0):
gcd=i
print("gcd is {}".format(gcd)) |
984,796 | 0193c5bc8814e3738a19753d09b45545954a6d8d | import cvxpy as cp
import numpy as np
class System:
devices = {}
requests = []
resources = ['power_cost', 'comfort', 'security']
resource_weights = [-1, 5, 10]
time = 0
rounded_time = 0
target_temperature_present = 20
target_temperature_absent = 20
power_limited = False
power_limit = 0
def __init__(self, env):
self.env = env
# Name = string with device name
# Obj = object representing interface to device, implements Device class
def register_device(self, obj):
self.devices[obj.name] = obj
# Print current action set
def show_current_state(self):
print("At time %d.%d.%d :" % (
self.rounded_time / (60 * 60), (self.rounded_time / 60) % 60, (self.rounded_time % 60)))
print("\tDevices:")
for n, o in self.devices.items():
print("\t\t%s = %s" % (n, o.current_state))
@staticmethod
def action_is_duplicate(a0, a1):
seen = set()
new_l = []
l = [a0, a1]
for d in l:
t = tuple(d.items())
if t not in seen:
seen.add(t)
new_l.append(d)
return len(new_l) == 1
def set_max_power_limit(self, limit):
if self.power_limited:
self.power_limit = min(self.power_limit, limit)
else:
self.power_limited = True
self.power_limit = limit
def submit_request(self, req):
self.requests.append(req)
# Update action set
def process(self):
requested_actions = []
weights = []
man_actions = []
con_action_pairs = []
dep_action_pairs = []
alt_actions = []
for req in self.requests:
requested_actions_, weights_, mandatory_actions_, \
contradicting_action_pairs_, dependent_action_pairs_, alternative_actions_ = req.read()
# Merge action sets
base_idx = len(requested_actions)
requested_actions += requested_actions_
weights += weights_
# alt_actions += alternative_actions_
man_actions += [x + base_idx for x in mandatory_actions_]
con_action_pairs += [{x[0] + base_idx, x[1] + base_idx}
for x in contradicting_action_pairs_]
dep_action_pairs += [{x[0] + base_idx, x[1] + base_idx}
for x in dependent_action_pairs_]
for x in alternative_actions_:
s = []
for y in x:
s.append(y + base_idx)
alt_actions.append(set(s))
# Find duplicate actions
removed_actions = []
for i0 in range(len(requested_actions)):
a0 = requested_actions[i0]
dups = []
for i1 in range(i0 + 1, len(requested_actions)):
a1 = requested_actions[i1]
if System.action_is_duplicate(a0, a1):
dups.append(i1)
weights_vec = weights[i0]
for d in dups:
# Update weights
# Choose the action set with the highest weights
for i in range(len(self.resources)):
weights_vec[i] = max(weights_vec[i], weights[d][i])
# Update weight list
weights[i0] = weights_vec
for d in dups:
weights[d] = weights_vec
removed_actions.append(d) # Mark duplicated actions as removed
# Update conflict indices
man_actions = [i0 if x in dups else x for x in man_actions]
con_action_pairs = [set([i0 if y in dups else y for y in x])
for x in con_action_pairs]
dep_action_pairs = [set([i0 if y in dups else y for y in x])
for x in dep_action_pairs]
alt_actions = [set([i0 if y in dups else y for y in x])
for x in alt_actions]
# Remove duplicated actions
for d in removed_actions:
requested_actions[d] = None
# Different actions executing on the same device are exclusive conflicts
for i0 in range(len(requested_actions)):
a0 = requested_actions[i0]
if a0 is not None:
for i1 in range(i0 + 1, len(requested_actions)):
a1 = requested_actions[i1]
if a1 is not None and a0["device"] == a1["device"]:
con_action_pairs.append({i0, i1})
# Remove duplicate conflicts
man_actions = list(set(man_actions))
# Convert into ILP problem
mu = cp.Variable(len(requested_actions), integer=True,
boolean=True) # whether or not the action is to be performed
# Define constraints
constraints = []
for m in man_actions:
constraints.append(mu[m] == 1)
for e in dep_action_pairs:
e_l = list(e)
constraints.append(mu[e_l[0]] - mu[e_l[1]] == 0)
for e in con_action_pairs:
e_l = list(e)
constraints.append(mu[e_l[0]] + mu[e_l[1]] <= 1)
for e in alt_actions:
e_l = list(e)
c = mu[e_l[0]] + mu[e_l[1]]
for idx in range(2, len(e_l)):
c += mu[e_l[idx]]
constraints.append(c <= 1)
# Create power limit constraint
if self.power_limited:
c = None
c_i = False
act_idx = 0
for i in range(len(requested_actions)):
if requested_actions[i] is not None:
if not c_i:
c = mu[act_idx] * weights[act_idx][0]
c_i = True
else:
c += mu[act_idx] * weights[act_idx][0]
act_idx += 1
constraints.append(c <= self.power_limit)
print("Power limited to %f W" % (self.power_limit))
self.power_limited = False
#self.power_limit = 0
# Define cost function
cost = None
cost_i = False
for j in range(len(self.resource_weights)):
c = None
c_i = False
act_idx = 0
for i in range(len(requested_actions)):
if requested_actions[i] is not None:
if not c_i:
c = mu[act_idx] * weights[act_idx][j]
c_i = True
else:
c += mu[act_idx] * weights[act_idx][j]
act_idx += 1
if not cost_i:
cost = c * self.resource_weights[j]
cost_i = True
else:
cost += c * self.resource_weights[j]
# Run ILP, try the ECOS_BB solver first, if it fails, use GLPK_MI
problem = cp.Problem(cp.Maximize(cost), constraints)
try:
problem.solve(solver=cp.ECOS_BB)
except:
problem.solve(solver=cp.GLPK_MI)
running_actions = np.round(mu.value)
# Execute actions
for act_idx in range(len(running_actions)):
if requested_actions[act_idx] is not None:
print('\033[94m[%s] %s requested.\033[0m' % (
self.devices[requested_actions[act_idx]["device"]].name, requested_actions[act_idx]["target"]))
for act_idx in range(len(running_actions)):
if requested_actions[act_idx] is not None:
if running_actions[act_idx] == 1:
print('\033[92m[%s] %s granted.\033[0m' % (
self.devices[requested_actions[act_idx]["device"]].name, requested_actions[act_idx]["target"]))
self.devices[requested_actions[act_idx]["device"]].transition_state(
requested_actions[act_idx]["target"]) # submit action
# Update all devices
for dev in self.devices.values():
dev.update(self, self.env)
self.requests = [] #Clear the requests list for the next tick
self.time += 1
self.rounded_time = self.time % (24 * 60 * 60)
|
984,797 | ef3184ea862e86c135515a367099b0d034ba99a9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import smbus # use I2C
import math
from time import sleep # time module
### define #############################################################
DEV_ADDR = 0x68 # device address
PWR_MGMT_1 = 0x6b # Power Management 1
ACCEL_XOUT = 0x3b # Axel X-axis
ACCEL_YOUT = 0x3d # Axel Y-axis
ACCEL_ZOUT = 0x3f # Axel Z-axis
TEMP_OUT = 0x41 # Temperature
GYRO_XOUT = 0x43 # Gyro X-axis
GYRO_YOUT = 0x45 # Gyro Y-axis
GYRO_ZOUT = 0x47 # Gyro Z-axis
# 1byte read
def read_byte( addr ):
return bus.read_byte_data( DEV_ADDR, addr )
# 2byte read
def read_word( addr ):
high = read_byte( addr )
low = read_byte( addr+1 )
return (high << 8) + low
# Sensor data read
def read_word_sensor( addr ):
val = read_word( addr )
if( val < 0x8000 ):
return val # positive value
else:
return val - 65536 # negative value
# Get Temperature
def get_temp():
temp = read_word_sensor( TEMP_OUT )
# offset = -521 @ 35℃
return ( temp + 521 ) / 340.0 + 35.0
# Get Gyro data (raw value)
def get_gyro_data_lsb():
x = read_word_sensor( GYRO_XOUT )
y = read_word_sensor( GYRO_YOUT )
z = read_word_sensor( GYRO_ZOUT )
return [ x, y, z ]
# Get Gyro data (deg/s)
def get_gyro_data_deg():
x,y,z = get_gyro_data_lsb()
# Sensitivity = 131 LSB/(deg/s), @cf datasheet
x = x / 131.0
y = y / 131.0
z = z / 131.0
return [ x, y, z ]
# Get Axel data (raw value)
def get_accel_data_lsb():
x = read_word_sensor( ACCEL_XOUT )
y = read_word_sensor( ACCEL_YOUT )
z = read_word_sensor( ACCEL_ZOUT )
return [ x, y, z ]
# Get Axel data (G)
def get_accel_data_g():
x,y,z = get_accel_data_lsb()
# Sensitivity = 16384 LSB/G, @cf datasheet
x = x / 16384.0
y = y / 16384.0
z = z / 16384.0
return [x, y, z]
### Main function ######################################################
bus = smbus.SMBus( 1 )
bus.write_byte_data( DEV_ADDR, PWR_MGMT_1, 0 )
while 1:
temp = get_temp()
print 't= %.2f' % temp, '\t',
gyro_x,gyro_y,gyro_z = get_gyro_data_deg()
print 'Gx= %.3f' % gyro_x, '\t',
print 'Gy= %.3f' % gyro_y, '\t',
print 'Gz= %.3f' % gyro_z, '\t',
accel_x,accel_y,accel_z = get_accel_data_g()
print 'Ax= %.3f' % accel_x, '\t',
print 'Ay= %.3f' % accel_y, '\t',
print 'Az= %.3f' % accel_z, '\t',
print # 改行
sleep( 1 )
|
984,798 | eab51efc4c5c003d31d69c3b8a769b76cbe0abc7 | #coding=utf-8
import sys,pathlib # *.py /qgb /[gsqp]
gsqp=pathlib.Path(__file__).absolute().parent.parent.absolute().__str__()
if gsqp not in sys.path:sys.path.append(gsqp)#py3 works
from qgb import py
U,T,N,F=py.importUTNF()
import numpy # as np
#True False array。
def test():
a = (a < 255).astype(numpy.int_) # <255 变 1, 255及以上 变0
a[:,6] # 获取 第 6 列
def plot(x,*ys,dys=None,markersize=1,font_size=8):
import matplotlib.pyplot as plt
fig,ax = plt.subplots(figsize=(8,8))
fig.subplots_adjust(
top=1.0,
bottom=0.034,
left=0.033,
right=1.0,
hspace=0.2,
wspace=0.2
)
plt.rc('font',size=font_size)
if not ys and dys:ys=dys
for k,y in U.iter_kv(ys):
plt.plot(x,y,'o',label=py.str(k),markersize=markersize)
plt.legend();
plt.show()
def two_point_line_function(*points,plot=True):
''' #(x1y1,x2y2,...):
Decimal('166.36363220214844') # UFuncTypeError: Cannot cast ufunc 'lstsq_n' input 0 from dtype('O') to dtype('float64') with casting rule 'same_kind'
float()转换 解决这个问题
'''
import numpy as np
import numpy.linalg as LA
t=U.col(points,0)
y=U.col(points,1)
A=np.c_[t, np.ones_like(t)]
#print(np.ones_like(t))
a,b=LA.lstsq(A,y,rcond=None)[0]
#####
if b<0:sop=''
else :sop='+'
print(f'y = {a} x {sop} {b}');
sf=f'y={py.round(a,3)}*x{sop}{py.round(b,3)}'
print(sf)
if plot:
import matplotlib.pyplot as plt
plt.rc('font',size=16)
plt.plot(t,y,'o',label='Original data',markersize=5)
plt.plot(t,A.dot([a,b]),'r',label=sf)
plt.legend();
# ax=plt.gca()
# ax.format_coord = lambda x,y:f'x={x} y={y}' # 好像 x,y 鼠标 标签 反了,后面怎么又正常了?
plt.show()
def counts(a,return_dict=True,one_value=False):
unique, counts = numpy.unique(a, return_counts=True)
r= numpy.asarray((unique, counts)).T.tolist()
if one_value and py.len(r)==1:
return r[0][0]
if return_dict:
return py.dict(r)
return r
def reverse_enumerate(a):
m=a.shape[0]-1
#(0,),v
for n,v in py.enumerate(numpy.flip(a)):
yield m-n,v
def enumerate(a,reverse=False):
'''
0,v0 ... 9,v9
reverse:
9,v9 ... 0,v0
'''
if reverse:
return reverse_enumerate(a)
else:
return py.enumerate(a)
def select_2d_columns(a,condition):
''' condition: a<11
'''
idx=(...,*np.where((condition).all(axis=0)))
return a[idx]
select_2d_cols=select_2d_columns
def select_2d_rows(a,condition):
''' condition: a<11
'''
idx=(*np.where((condition).all(axis=1)),...)
return a[idx]
def expand_2d_array(a,top=0,bottom=0,left=0,right=0,mode='constant',constant_values=0):
''' only support 2d array
bottom = 0
'''
return numpy.pad(a,[(top,bottom),(left,right)],mode,constant_values=constant_values)
pad=pad2d=expand_array=expand_2d_array
def pad_array(a,pad_width,mode='constant',constant_values=0):
''' pad_width: [(d1_head,d1_tail),(d2_head,d2_tail), ...]
'''
return numpy.pad(a,pad_width,mode,constant_values=constant_values)
def 一维变对角矩阵(a):
return numpy.diag(a)
diag=dj=djjz=一维变对角矩阵
def 二维变对角矩阵(a):
return numpy.diagflat(a)
def slice_2d_array(a,x,y):
'''不能这样用 Y.slice_2d_array(d,0:5,0:5)
SyntaxError: invalid syntax
In [629]: d[0:5,0:5]
Out[629]:
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 4]])
In [630]: d[0:5,0:4]
Out[630]:
array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]])
_.shape
(5, 4)
'''
return a[x,y]
|
984,799 | 08ca229f3141a342a94e75013dc9efb42420848f | # Copyright 2022 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_number_hypot]
# Left input is x and right input is y, representing point (x,y).
# 0
print('Length from origin to point (0,0):', ee.Number(0).hypot(0).getInfo())
# 3
print('Length from origin to point (3,0):', ee.Number(3).hypot(0).getInfo())
# 5
print('Length from origin to point (3,4):', ee.Number(3).hypot(4).getInfo())
# 5
print('Length from origin to point (-3,4):', ee.Number(-3).hypot(4).getInfo())
# 5
print('Length from origin to point (-3,-4):', ee.Number(-3).hypot(-4).getInfo())
# [END earthengine__apidocs__ee_number_hypot]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.