text stringlengths 38 1.54M |
|---|
integer = int(input('Введите целое положительное число: '))
print('Число: ', integer)
max = 0
if integer > 0:
while integer % 10 or integer // 10:
if max < integer % 10:
max = integer % 10
integer = integer // 10
else:
integer = integer // 10
else:
print('Число отрицательное')
print('Максимальное: ', max) |
# coding: utf-8
"""
OneLogin API
OpenAPI Specification for OneLogin # noqa: E501
The version of the OpenAPI document: 3.1.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import BaseModel, Field, StrictStr, conlist
class ActionObj(BaseModel):
"""
ActionObj
"""
action: Optional[StrictStr] = Field(None, description="The action to apply")
value: Optional[conlist(StrictStr)] = Field(None, description="Only applicable to provisioned and set_* actions. Items in the array will be a plain text string or valid value for the selected action.")
__properties = ["action", "value"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> ActionObj:
"""Create an instance of ActionObj from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ActionObj:
"""Create an instance of ActionObj from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ActionObj.parse_obj(obj)
_obj = ActionObj.parse_obj({
"action": obj.get("action"),
"value": obj.get("value")
})
return _obj
|
"""
Exercício Python 105: Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai
retornar um dicionário com as seguintes informações:
– Quantidade de notas
– A maior nota
– A menor nota
– A média da turma
– A situação (opcional)
"""
def notas(*args, sit=False):
"""
-> Função que vai receber notas e retornar, atravez de um dicionário, a situação das notas de um turma
:param args: entrada de notas
:param sit: argumento True/False pora ativar a situação da turma
:return: dicionário com quantidade de notas, maior nota, menor nota, média da turma
"""
turma = dict()
turma['qtd_notas'] = len(args)
turma['maior_nota'] = max(args)
turma['menor_nota'] = min(args)
turma['media_turma'] = sum(args) / len(args)
if sit:
if turma['media_turma'] >= 7:
turma['situação'] = 'EXCELENTE'
elif 5 < turma['media_turma'] < 7:
turma['situação'] = 'PREOCUPANTE'
else:
turma['situação'] = 'CRÍTICA'
print(turma)
notas(7.6, 5.6, 10, 9.8, 8, 6.5, 10, 1.3, 10, 10, 9.4, sit=True)
|
from Node import Node
class UnorderedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.head is None
def add(self, item):
temp = Node(item)
temp.setNext(self.head)
self.head = temp
def size(self):
current = self.head
count = 0
while current is not None:
count += 1
current = current.getNext()
return count
def search(self, key):
current = self.head
found = False
while current is not None and not found:
if current.getData() == key:
found = True
else:
current = current.getNext()
return found
def remove(self, item):
previous = None
current = self.head
found = False
while not found:
if current.getData() == item:
found = True
else:
previous = current
current = current.getNext()
if not found:
print("Element not found")
return
if previous is None and found:
self.head = current.getNext()
else:
previous.setNext(current.getNext())
def append(self, item):
temp = Node(item)
temp.setNext(None)
current = self.head
while current.getNext() is not None:
current = current.getNext()
current.setNext(temp)
def insert(self, position, item):
temp = Node(item)
temp.setNext(None)
previous = None
current = self.head
pos_count = 0
if position <= 0 or position > self.size()+1:
print("Invalid position")
return
while pos_count != position - 1:
previous = current
current = current.getNext()
pos_count += 1
if previous is None:
self.add(item)
else:
previous.setNext(temp)
temp.setNext(current)
def index(self, item):
current = self.head
found = False
index = 0
while current is not None and not found:
if current.getData() == item:
found = True
else:
current = current.getNext()
index += 1
return index if found else -1
def pop(self, pos=-1):
current = self.head
previous = None
if pos != -1:
if pos > self.size()+1 or pos <= 0:
# print("Invalid position")
return
cur_pos = 0
while cur_pos != pos-1:
previous = current
current = current.getNext()
cur_pos += 1
popped_item = current.getData()
if previous is not None:
previous.setNext(current.getNext())
else:
self.head = current.getNext()
return popped_item
else:
while current.getNext() is not None:
previous = current
current = current.getNext()
previous.setNext(None)
return current.getData()
def __str__(self):
result = "["
current = self.head
while current is not None:
result += str(current.getData())
current = current.getNext()
if current:
result += ", "
result += "]"
return result
# mylist = UnorderedList()
#
# mylist.add(31)
# mylist.add(77)
# mylist.add(17)
# mylist.add(93)
# mylist.add(26)
# mylist.add(54)
#
# print(mylist.size())
# print(mylist)
# print(mylist.pop(0))
# print(mylist)
# print(mylist.pop(1))
# print(mylist)
# print(mylist.pop(5))
# print(mylist)
# print(mylist)
# print(mylist.search(100))
#
# mylist.add(100)
# print(mylist.search(100))
# print(mylist.size())
#
# mylist.remove(54)
# print(mylist.size())
# mylist.remove(93)
# print(mylist.size())
# mylist.remove(31)
# print(mylist.size())
# print(mylist.search(93)) |
import json, os, threading
from watson_developer_cloud import ConversationV1
from watson_developer_cloud import ToneAnalyzerV3
import numpy as np
import scipy.io.wavfile as wv
import matplotlib.pyplot as plt
from PIL import Image
import speech_recognition as sr
r = sr.Recognizer()
tones = {'Anger':0.0, 'Disgust':0.0, 'Fear':0.0, 'Joy':0.0, 'Sadness':0.0}
tone_analyzer = ToneAnalyzerV3(
username='1d3684c8-af39-4908-be61-fc45f1f579d9',
password='t5ZBkiLZaz8S',
version='2016-02-11')
workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID'
maintainToneHistoryInContext = True
payload = "who authorised the unlimited expense account"
done = False
def invokeToneConversation():
global done
done = False
tone = tone_analyzer.tone(text=payload)
# print(tone["document_tone"]["tone_categories"])
tones["Anger"] = tone["document_tone"]["tone_categories"][0]["tones"][0]["score"] *100
tones["Disgust"] = tone["document_tone"]["tone_categories"][0]["tones"][1]["score"] *100
tones["Fear"] = tone["document_tone"]["tone_categories"][0]["tones"][2]["score"] *100
tones["Joy"] = tone["document_tone"]["tone_categories"][0]["tones"][3]["score"] *100
tones["Sadness"] = tone["document_tone"]["tone_categories"][0]["tones"][4]["score"] *100
print(payload)
done = True
for k,v in tones.items():
print(k, " : ", round(v, 2), "%")
audioforSpeechToText = None
class GoogleTextToSpeech(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
try:
global converted
converted = False
print("Inside")
print(audioforSpeechToText)
text = r.recognize_google(audioforSpeechToText)
print("Transcription: " + text)
file = open("Transcript.txt", "w")
file.write(text)
file.close()
except:
print("Could not understand audio") |
# -*- coding: utf-8 -*-
import re
import os
import codecs
from bs4 import BeautifulSoup
from scrapy import Request
from baike_scrapy.items import *
from scrapy.selector import Selector
class HudongSpider(scrapy.Spider):
name = 'hudong_spider'
allowed_domains = ['baike.com']
start_urls = ['http://www.baike.com/wiki/%E5%8C%97%E4%BA%AC%E8%88%AA%E7%A9%BA%E8%88%AA%E5%A4%A9%E5%A4%A7%E5%AD%A6']
root_path = './data/'
visited_urls = set()
url_pattern = 'http://baike.com{}'
def start_requests(self):
for url in self.start_urls:
yield Request(url, callback=self.hudong_parse)
def hudong_parse(self, response):
# self.save_page_content(response.body) # 保存获取的词条页面信息
self.parse_page_content(response.body) # 保存页面解析结果
soup = BeautifulSoup(response.body, "lxml")
links = soup.find_all("a", href=re.compile('/wiki/*'))
for link in links:
if 'www.baike.com' not in link['href']:
link['href'] = self.url_pattern.format(link['href'])
if link not in self.visited_urls and len(self.visited_urls) < 100:
yield Request(link["href"], callback=self.hudong_parse)
self.visited_urls.add(link["href"])
print('index: %d, visit %s' % (len(self.visited_urls), link["href"]))
# yield self.get_hudong_info(response.body) # 页面解析结果存入item,返回使用pipline存入neo4j
def get_hudong_info(self, content):
"""
:param content:
:return:
:将互动词条页面Info框内信息、词条描述、词条标签解析并放入item
"""
selector = Selector(text=content)
title = ''.join(selector.xpath('//h1/text()').extract()).replace('/', '')
td_cons = selector.xpath("//div[@id='datamodule']//table//tr//td").extract()
item = GraphNode()
item['name'] = title
item['props'] = {}
for td_con in td_cons:
temp = Selector(text=td_con).xpath('//strong/text()').extract()
name = ''.join(temp).replace('\n', '')
name = name[:-1].strip()
temp = Selector(text=td_con).xpath('//span/text()').extract()
value = ''.join(temp).replace('\n', '')
if value is not None:
item['props'][name] = value
# 获取词条描述信息
desc = selector.xpath("//div[@id='unifyprompt']//p//text()").extract()
description = re.sub('\[[0-9]+\]', '', ''.join(desc).replace('\n', ''))
item['props']['词条描述'] = description
# 获取词条标签
labels = selector.xpath("//div[@class='place']//p//a//text()").extract()
label = ','.join(labels).replace('\n', '').replace(' ', '')
item['props']['词条标签'] = label
return item
def parse_page_content(self, content):
"""
:param content:
:return:
:将互动词条页面Info框内信息解析并保存为txt文件
:主要解析三部分信息:1.info框,2.词条描述,3.词条标签
"""
# 获取info框信息
selector = Selector(text=content)
td_cons = selector.xpath("//div[@id='datamodule']//table//tr//td").extract()
lines = ''
for td_con in td_cons:
temp = Selector(text=td_con).xpath('//strong/text()').extract()
name = ''.join(temp).replace('\n', '')
name = name[:-1].strip().replace(' ', '')
temp = Selector(text=td_con).xpath('//span/text()|//span/a/text()').extract()
value = ''.join(temp).replace('\n', '')
if name != '' and value != '':
lines += name + '$$' + value + '\n'
# 获取词条描述信息
desc = selector.xpath("//div[@id='unifyprompt']//p//text()").extract()
description = re.sub('\[[0-9]+\]', '', ''.join(desc).replace('\n', ''))
lines += '词条描述' + '$$' + description + '\n'
# 获取词条标签
labels = selector.xpath("//div[@class='place']//p//a//text()").extract()
label = ','.join(labels).replace('\n', '').replace(' ', '')
lines += '词条标签' + '$$' + label + '\n'
# 存储信息
path = os.path.join(self.root_path, 'hudong_infos') # 创建文件存放路径
if not os.path.exists(path):
os.mkdir(path)
title = ''.join(selector.xpath('//h1/text()').extract()).replace('/', '')
f = codecs.open(os.path.join(path, title + '.txt'), 'w', encoding='utf-8')
f.write(lines)
f.close()
def save_page_content(self, content):
"""
:param content: response.body
:return: None
:将爬取的页面内容保存到磁盘
"""
selector = Selector(text=content)
title = selector.xpath('//title/text()').extract()[0].strip() # 获取文件标题
path = os.path.join(self.root_path, 'hudong_pages') # 创建文件存放路径
if not os.path.exists(path):
os.mkdir(path)
f = codecs.open(os.path.join(path, title + '.html'), 'w', encoding='utf-8')
f.write(content.decode('utf-8', errors='ignore'))
f.close()
|
import time
from models import Model
class Topic(Model):
@classmethod
def get(cls, id):
m = cls.find_by(id=id)
m.views += 1
m.save()
return m
def __init__(self, form):
self.id = None
self.views = 0
self.title = form.get('title', '')
self.content = form.get('content', '')
self.ct = int(time.time())
self.ut = self.ct
self.user_id = form.get('user_id', '')
self.board_id = int(form.get('board_id', -1))
def replies(self):
from .reply import Reply
ms = Reply.find_all(topic_id=self.id)
return ms
def time(self):
time_format = '%Y/%m/%d %H:%M:%S'
localtime = time.localtime(self.ct)
formatted = time.strftime(time_format, localtime)
return formatted
def user(self):
from .user import User
user = User.find_by(id=self.user_id)
return user.username
def days(self):
now = int(time.time())
t_ia = now-self.ct
day = t_ia // 86400
if day == 0:
return '今天'
else:
return '{}天前'.format(day)
def borad(self):
from .board import Board
m = Board.find(self.board_id)
return m
def board_title(self):
from .board import Board
if self.board_id == -1:
return '未分类'
else:
b = Board.find(self.board_id)
return b.title
def user_image(self):
from .user import User
u = User.find(self.user_id)
return u.user_image
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import nltk
import re
#########################################################################
# create a dictionary of {[entity_name, URI_de_entity_name]}
#########################################################################
with open('entity_list.txt') as f:
content = f.readlines()
lines = []
for each_line in content:
lines.append(each_line.strip('\n'))
dict_entity_name_URI = {}
#########################################################################
# start to extract the NNP entities from the .txt files
# create a list of entity names
#########################################################################
file = str(sys.argv[1])
newFile = file.split('/')[-1].split('.')[0] + "_2.txt"
f = open(file, 'r')
sample = f.readlines()
f.close()
patterns = """
NP: {<DT|PP\$>?<JJ>*<NN>}
{<NNP>+}
{<NN>+}
"""
NPChunker = nltk.RegexpParser(patterns) # create a chunk parser
f = open(file, 'r')
filedata = f.read()
f.close()
# a tree traversal function for extracting NP chunks in the parsed tree
def traverse(t):
each_entity_name = []
try:
t.label
except AttributeError:
return
else:
if t.label() == 'NP':
# print("t : ", t) # (NP Vanessa/NNP Paradis/NNP)
# child : ('Vanessa', 'NNP')
# child : ('Paradis','NNP')
nnp_child_name = ""
for child in t:
if child[1] == 'NNP':
nnp_child_name = nnp_child_name + child[0] + " "
nnp_child_name = nnp_child_name.replace('\n', '')
nnp_child_name = nnp_child_name.replace('\t', '')
nnp_child_name = nnp_child_name.strip('\n')
nnp_child_name = nnp_child_name.strip('_')
if nnp_child_name != "":
nnp_child_name = nnp_child_name.replace(' ', '_').strip('_').replace('_', ' ')
each_entity_name.append(nnp_child_name)
nnp_child_name_URI = 'http://en.wikipedia.org/wiki/' + nnp_child_name.replace(' ', '_').strip('_')
for each_URI in lines:
if each_URI == nnp_child_name_URI:
nnp_child_name_lists = []
nnp_child_name_lists.append(nnp_child_name)
nnp_child_name_lists += nnp_child_name.split(" ")
for l in nnp_child_name_lists:
dict_entity_name_URI[l] = nnp_child_name_URI
else:
for child in t:
traverse(child)
for line in sample:
tokenized_words = nltk.word_tokenize(line)
tagged_words = nltk.pos_tag(tokenized_words)
result = NPChunker.parse(tagged_words)
traverse(result)
keys = dict_entity_name_URI.keys()
# define desired replacements here
rep = {}
for key in keys:
key_URI = "<entity name=\"" + dict_entity_name_URI[key] + "\">" + key + "</entity>"
rep[key] = key_URI
# use these three lines to do the replacement
rep = dict((re.escape(k), rep[k]) for k in rep.keys())
pattern = re.compile("|".join(rep.keys()))
filedata = pattern.sub(lambda m: rep[re.escape(m.group(0))], filedata)
result_file = open(newFile, 'w')
result_file.write(filedata)
|
from django.shortcuts import render
# Create your views here.
def homepage(request, *args, **kwargs):
if not request.session.session_key:
return render(request, "loginPage.html")
return render(request, "main.html") |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from datasets import MultiMNIST
from train import load_checkpoint
from utils import char_tensor, charlist_tensor
from utils import tensor_to_string
def fetch_multimnist_image(label):
"""Return a random image from the MultiMNIST dataset with label.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
MultiMNIST image
"""
dataset = MultiMNIST('./data', train=False, download=True,
transform=transforms.ToTensor(),
target_transform=charlist_tensor)
images = dataset.test_data
labels = dataset.test_labels
n_rows = len(images)
images = []
for i in xrange(n_rows):
image = images[i]
text = labels[i]
if tensor_to_string(text.squeeze(0)) == label:
images.append(image)
if len(images) == 0:
sys.exit('No images with label (%s) found.' % label)
images = torch.cat(images).cpu().numpy()
ix = np.random.choice(np.arange(images.shape[0]))
image = images[ix]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_multimnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = char_tensor(label).unsqueeze(0)
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_multimnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_multimnist_image(args.condition_on_image)
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
txt_recon = torch.max(txt_recon, dim=2)[1]
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 50, 50),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
for i in xrange(text_recon.size(0)):
text_recon_str = tensor_to_string(text_recon[i])
fp.write('Text (%d): %s\n' % (i, text_recon_str))
|
from pyramid.view import view_config, view_defaults
from formencode import validators
from formencode.api import Invalid
from pyramid.httpexceptions import HTTPBadRequest
from ..models import DBSession, Manufacturer
from ..schemas.add_part import AddPartSchema
from ..utils.dbhelpers import get_or_404
from .base import BaseView
class ManufacturersEditView(BaseView):
@view_config(
route_name='manufacturers_edit',
renderer='json',
request_method='POST')
def manufacturers_edit(self):
manufacturer = get_or_404(Manufacturer, self.request.POST.get('pk'))
if self.request.POST.get('name') == 'name':
manufacturer.name = self.request.POST.get('value')
return {'value': manufacturer.name}
if self.request.POST.get('name') == 'url':
raw_url = self.request.POST.get('value')
val = validators.URL(add_http=True)
try:
manufacturer.url = val.to_python(raw_url)
return {'value': manufacturer.url}
except Invalid as e:
self.request.response.status = 400
return {'message': str(e)}
|
import os
import numpy as np
from torchvision import models, transforms
import torch
import torch.nn as nn
from PIL import Image
from torch.nn import functional as F
import numpy as np
from models.modeling import VisionTransformer, CONFIGS
with open('./2021VRDL_HW1_datasets/testing_img_order.txt') as f:
test_images = [x.strip() for x in f.readlines()] # all the testing images
with open('./2021VRDL_HW1_datasets/classes.txt') as f:
class_ind = [x.strip() for x in f.readlines()]
i = 0
submission = []
for img in test_images: # image order is important to your result
print(i)
i += 1
# predict
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop((224, 224), scale=(0.05, 1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]),
'validation':
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
}
config = CONFIGS["ViT-B_16"]
model = VisionTransformer(config, 224, zero_head=True, num_classes=200)
model.load_state_dict(torch.load('output/bird_checkpoint.bin'))
model.to(device)
test_image = Image.open('2021VRDL_HW1_datasets/testing_images/'+img)
model.eval()
with torch.no_grad():
validation = torch.stack(
[image_transforms['validation'](test_image).to(device)])
pred = model(validation)[0]
preds = torch.argmax(pred, dim=-1)
preds = preds.cpu().numpy()[0]
predicted_class = class_ind[preds]
# #########
print(predicted_class)
submission.append([img, predicted_class])
np.savetxt('answer.txt', submission, fmt='%s')
|
"""
When ever a wallet is created, two keys are generated,
the public key and the private key. The private key belongs
to the user along and we can not hold or save the private key.
the public key - we can store and it would be used when sending zuri coin
to the user...
"""
import binascii
from uuid import uuid4
import Crypto.Random
import requests
from Crypto import Signature
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from hashlib import sha256
from datetime import datetime
def SHA256(text):
return sha256(text.encode('ascii')).hexdigest()
class Wallet:
"""Creates, loads and holds private and public keys. Manages transaction
signing and verification."""
def __init__(self):
self.private_key = None
self.public_key = None
self.access_token = None
def create_keys(self):
"""Create a new pair of private and public keys."""
private_key, public_key = self.generate_keys()
self.private_key = private_key
self.public_key = public_key
def load_keys(self):
"""Loads the keys from the wallet.txt file into memory."""
try:
with open('wallet.txt', mode='r') as f:
keys = f.readlines()
public_key = keys[0][:-1].split(" ")[-1]
private_key = keys[1].split(" ")[-1]
self.public_key = public_key
self.private_key = private_key
return True
except (IOError, IndexError):
print('Loading wallet failed...')
return False
def print_keys(self):
txt = open("wallet.txt", "r+", encoding='utf-8-sig')
for i, lines in enumerate(txt):
line = lines.strip("\n")
print(line)
def save_keys(self):
if self.public_key and self.private_key:
self.access_token = uuid4()
""" url = "https://a1in1.com/Zuri Coin/Waziri_Coin/waziri_d_enter_walletor.php" \
+ "?pub={}&private={}&access={}".format(
self.public_key,
self.private_key,
access_token
) """
url = "https://a1in1.com/Zuri Coin/Waziri_Coin/waziri_d_enter_walletor.php" \
+ "?pub={}&private={}&access={}".format(
self.public_key,
SHA256(self.private_key),
str(self.access_token)
)
response = requests.get(url)
print(self.public_key)
print(SHA256(self.private_key))
print(self.access_token)
print(response.text)
data = dict(response.json())
if response.status_code == 200 and data.get("status") == "true":
print('Saving wallet successful ')
today = str(datetime.today().isoformat())
with open('wallet.txt', mode='a') as f:
f.write(str(today + "\n" + "Public Key: " + self.public_key + "\n"+ \
"Private Key: " + self.private_key + "\n" + \
"Access Token: " + str(self.access_token) + "\n"
))
print("Saving wallet Locally was Successful \n")
else:
print('Saving wallet failed...')
""" try:
today = str(date.today())
with open('wallet.txt', mode='a') as f:
f.write(today)
f.write("\n")
f.write("Public Key: " + self.public_key)
f.write('\n')
f.write("Private Key: " + self.private_key)
f.write('\n')
print("Saving wallet Locally was Successful \n")
except:
with open('wallet.txt', mode='w') as f:
f.write("Public Key: " + self.public_key)
f.write('\n')
f.write("Private Key: " + self.private_key)
print("Saving wallet Locally was Successful \n")
else:
print('Saving wallet Locally failed...') """
def generate_keys(self):
"""Generate a new pair of private and public key."""
private_key = RSA.generate(1024, Crypto.Random.new().read)
public_key = private_key.publickey()
return (
binascii
.hexlify(private_key.exportKey(format='DER'))
.decode('ascii'),
binascii
.hexlify(public_key.exportKey(format='DER'))
.decode('ascii')
)
# def sign_transaction(self, sender, recipient, amount):
# """Sign a transaction and return the signature.
# Arguments:
# :sender: The sender of the transaction.
# :recipient: The recipient of the transaction.
# :amount: The amount of the transaction.
# """
# signer = PKCS1_v1_5.new(RSA.importKey(
# binascii.unhexlify(self.private_key)))
# h = SHA256.new((str(sender) + str(recipient) +
# str(amount)).encode('utf8'))
# signature = signer.sign(h)
# return binascii.hexlify(signature).decode('ascii')
# @staticmethod
# def verify_transaction(transaction):
# """Verify the signature of a transaction.
# Arguments:
# :transaction: The transaction that should be verified.
# """
# public_key = RSA.importKey(binascii.unhexlify(transaction.sender))
# verifier = PKCS1_v1_5.new(public_key)
# h = SHA256.new((str(transaction.sender) +
# str(transaction.recipient) +
# str(transaction.amount)).encode('utf8'))
# return verifier.verify(h, binascii.unhexlify(transaction.signature))
""" if __name__ == '__main__':
wallet = Wallet()
wallet.create_keys()
wallet.save_keys()
#wallet.load_keys() """
|
# -*- coding: utf-8 -*-
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# class delivery_carrier(osv.osv):
# _inherit = "delivery.carrier"
# _columns = {
# 'use_webservice_pricelist': fields.boolean('Advanced Pricing by WebService', help="Check this box if ..."),
# 'webservice_id': fields.many2one('delivery.webservice', 'Service', 'WebService'),
# 'webservice_type': fields.char('Service Type', size=32),
# }
class DeliveryGrid(osv.osv):
_inherit = "delivery.grid"
# _description = "Delivery Webservice"
_columns = {
'service': fields.char('Service Name', size=32),
'service_type': fields.char('Name', size=32),
'login': fields.char('Login:', size=32),
'password': fields.char('Password:',size=32)
}
|
import vector
import math
import game
import action
import hero
import direction
import time
import heapq
def manhattanDistance(p,q):
if not isinstance(p,vector.Vector):
raise ValueError("Variable is not a Vector.")
if not isinstance(q,vector.Vector):
raise ValueError("Variable is not a Vector.")
return math.fabs(p.x - q.x) + math.fabs(p.y - q.y)
def solveGameGradientDescending(game_to_solve,max_steps = 1000):
if not isinstance(game_to_solve,game.Game):
raise ValueError("Variable is not a Game.")
def functionToMinimize(game_to_solve,hero_state):
goal_position = game_to_solve.getGoalPosition()
return manhattanDistance(hero_state.getPosition(),goal_position)
goal_position = game_to_solve.getGoalPosition()
hero_state = game_to_solve.getHero().copy()
actions_to_take = []
steps = 0
while True:
steps += 1
min_dist = float("inf")
best_action = None
for act in action.ACTIONS:
next_hero_state = game_to_solve.transitionModel(hero_state,act)
dist = functionToMinimize(game_to_solve,next_hero_state)
if dist < min_dist:
min_dist = dist
best_action = act
hero_state = game_to_solve.transitionModel(hero_state,best_action)
actions_to_take.append(best_action)
print "--"
print goal_position
print hero_state.getPosition()
if goal_position == hero_state.getPosition() or steps > max_steps:
break
return actions_to_take
def solveAStar(game_to_solve):
if not isinstance(game_to_solve,game.Game):
raise ValueError("Variable is not a Game.")
def heuristic(game_to_solve,hero_state):
goal_position = game_to_solve.getGoalPosition()
return manhattanDistance(hero_state.getPosition(),goal_position)
goal_position = game_to_solve.getGoalPosition()
hero_start_state = game_to_solve.getHero().copy()
heap = [(heuristic(game_to_solve,hero_start_state),hero_start_state,None,None)]
HEAP_HEURISTIC = 0
HEAP_STATE = 1
HEAP_ACTION_TAKEN_THERE = 2
HEAP_FATHER_STATE = 3
visited = []
recover_map = {}
RECOVER_ACTION = 0
RECOVER_FATHER = 1
ACTIONS_COST = 1
while True:
heap_element = heapq.heappop(heap)
while heap_element[HEAP_STATE] in visited:
if len(heap) == 0:
return []
heap_element = heapq.heappop(heap)
visited.append(heap_element[HEAP_STATE])
recover_map[heap_element[HEAP_STATE]] = (heap_element[HEAP_ACTION_TAKEN_THERE], heap_element[HEAP_FATHER_STATE])
if goal_position == heap_element[HEAP_STATE].getPosition():
recover_map[hero_start_state] = None
actions = []
state_tuple = heap_element[HEAP_STATE]
recover = recover_map[state_tuple]
while recover != None:
actions.append(recover[RECOVER_ACTION])
new_state = recover[RECOVER_FATHER]
state_tuple = new_state
recover = recover_map[state_tuple]
actions.reverse()
return actions
actions_possible = action.ACTIONS
for act in actions_possible:
next_state = game_to_solve.transitionModel(heap_element[HEAP_STATE], act)
heapq.heappush(heap, (heap_element[0] + game_to_solve.costAction(heap_element[HEAP_STATE],act) + heuristic(game_to_solve,next_state) - heuristic(game_to_solve,heap_element[HEAP_STATE]), next_state, act, heap_element[HEAP_STATE]))
|
import itertools
def get_permutations(l):
for n in range(1, len(l)+1):
for permutation in itertools.permutations(l, n):
value = 0
for digit in permutation:
value = value * 10 + digit
yield value
def solution(l):
value = 0
for p in get_permutations(l):
if p % 3 == 0:
value = max(value, p)
return value |
# generic metadata generator for all urls stored in text file
# reading urls from text file and generating metadata to data.json file
from newsplease import NewsPlease
import json
data = []
f=open("y.txt", "r")
c = f.readlines()
for post in c:
print(post)
x = NewsPlease.from_url(post)
data_json = {
"URL": x.url,
"Domain": x.source_domain,
"title": x.title,
"author": str(x.authors),
"text": str(x.text),
"date_published": str(x.date_publish)}
data.append(data_json)
with open('data.json', 'w+') as outfile:
json.dump(data, outfile)
|
# Simple GUI with:
# Label
# Scrolledtext
# Button
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
window = tk.Tk()
window.title("My Personal Title")
window.geometry("300x300")
window.resizable(0,0)
window.wm_iconbitmap("images/ok.ico")
#Label to guide user:
# Scrolledtext
#Button to continue. It does not work... yet!
ok_button = ttk.Button(window, text = "Next")
ok_button.pack(pady = 50)
window.mainloop()
|
import time
import datetime
print(time.time()) # 1970年1月1日到现在所经历的秒数
print(time.localtime()) # 当前时间的结构化输出,tm_wday从0到6,0代表周一;tm_yday从1月1日到现在的天数; tm_isdst是否为夏令时,取值为-1,0,1
print(time.strftime(r'%Y-%m-%d %H:%M:%S')) # 当前时间的格式化输出,输出字符串格式
print(datetime.datetime.now()) # 当前日期与时间
# 计算日期的加减
oneday = datetime.datetime(2008,2,21) # 2008年2月21日,datetime格式
day = datetime.timedelta(days=20,hours=3,minutes=20,seconds=48) # 相差20天3小时20分钟48秒
newday = oneday + day
print(newday) # 新的日期的datetime格式输出
print(datetime.datetime.now() - newday) # 现在与newday的时间差
aday = datetime.datetime.strptime('20190801',r'%Y%m%d') # 传入字符串格式日期,格式化输出datetime格式的日期
print(type(aday))
|
from django.db import models
# Create your models here.
"""
1 先写普通字段
2 再写外键字段
"""
from django.contrib.auth.models import AbstractUser
class UserInfo(AbstractUser):
phone = models.BigIntegerField(verbose_name='手机号', null=True, blank=True)
"""
null=True 数据库该字段可以为空
blank=True admin后台管理该字段可以为空
"""
avatar = models.FileField(upload_to='avatar', default='avatar/default.png', verbose_name='头像')
"""
给avatar传文字对象,文件会自动存储到avatar文件夹下
默认只保存avatar/default.png
"""
create_time = models.DateField(auto_now_add=True)
blog = models.OneToOneField(to='Blog', null=True, on_delete=models.CASCADE)
class Meta:
verbose_name_plural = '用户表' # 修改admin后台管理默认的表名
# verbose_name = '用户表' # 末尾还是自动加s
def __str__(self):
return self.username
class Blog(models.Model):
site_name = models.CharField(verbose_name='站点名称', max_length=32)
site_title = models.CharField(verbose_name='站点标题', max_length=32)
site_theme = models.CharField(verbose_name='站点样式', max_length=64) # 存css/js文件路径
def __str__(self):
return self.site_name
class Category(models.Model):
name = models.CharField(verbose_name='文章分类', max_length=32)
blog = models.ForeignKey(to='Blog', null=True, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(verbose_name='文章标签', max_length=32)
blog = models.ForeignKey(to='Blog', null=True, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(verbose_name='文章标题', max_length=64)
desc = models.CharField(verbose_name='文章简介', max_length=255)
# 内容文字很多,一般用TextField
content = models.TextField(verbose_name='文章内容')
create_time = models.DateField(auto_now_add=True)
# 字段设计优化
up_num = models.BigIntegerField(verbose_name='点赞数', default=0)
down_num = models.BigIntegerField(verbose_name='点踩数', default=0)
comment_num = models.BigIntegerField(verbose_name='评论数', default=0)
# 外键字段
blog = models.ForeignKey(to='Blog', null=True, on_delete=models.CASCADE)
category = models.ForeignKey(to='Category', null=True, on_delete=models.DO_NOTHING)
# 半自动创建第三章关系表,使用orm并方便拓展
# tag = models.ManyToManyField(to='Tag', null=True)
tags = models.ManyToManyField(to='Tag',
through='Article2Tag',
through_fields=('article', 'tag'),
null=True)
def __str__(self):
return self.title
class Article2Tag(models.Model):
"""自建第三章关系表"""
article = models.ForeignKey(to='Article', on_delete=models.CASCADE)
tag = models.ForeignKey(to='Tag', on_delete=models.CASCADE)
class UpAndDown(models.Model):
user = models.ForeignKey(to='UserInfo', on_delete=models.CASCADE)
article = models.ForeignKey(to='Article', on_delete=models.CASCADE)
is_up = models.BooleanField() # 传布尔值 0/1
class Comment(models.Model):
user = models.ForeignKey(to='UserInfo', on_delete=models.CASCADE)
article = models.ForeignKey(to='Article', on_delete=models.CASCADE)
content = models.CharField(verbose_name='评论内容', max_length=255)
comment_time = models.DateTimeField(verbose_name='评论时间', auto_now_add=True)
# 自关联
# parent = models.ForeignKey(to="Comment", null=True)
parent = models.ForeignKey(to="self", null=True, on_delete=models.CASCADE)
|
# ----------------------------------
# CCF1d.py (SPARTA UNICOR class)
# ----------------------------------
# This file defines the "CCF1d" class. An object of this class stores
# a Spectrum object, saved in the self.spec field and a Template object
# stored in the self.template field.
#
# ---------------------------------------------
# A CCF1d class stores the following methods:
# ---------------------------------------------
# 1) CrossCorrelateSpec - Multi-order cross correlation.
# 2) CombineCCFs - Sums the CCFs of a multi-order spectrum.
# 3) extract_RV - Get the radial velocity and its uncertainty from maximum likelihood.
# 4) calcBIS - Determine full-with-half-maximum of a peaked set of points, x and y.
# 5) subpixel_CCF - Using a second order approximation to estimate the ccf at a given velocity.
# 6) correlate1d - A wrapper of the jitted one-dimensional correlation
# 7) plotCCFs - Produces plots of the calculated CCFs
#
#
# Dependencies: scipy, numpy, astropy, numba, matplotlib and copy.
# Last update: Avraham Binnenfeld, 20210510.
from scipy import interpolate
from scipy.signal import correlate
import numpy as np
from astropy import constants as consts, units as u
from numba import njit
import matplotlib.pyplot as plt
from copy import deepcopy
class CCF1d:
# =============================================================================
# =============================================================================
def __init__(self):
'''
No input required.
Some defaults are set...
'''
self.c = consts.c.to('km/s') # The speed of light in km/sec
self.default_dv = 0.1 * u.kilometer / u.second
# =============================================================================
# =============================================================================
def CrossCorrelateSpec(self, spec_in, template_in,
dv=None, VelBound=100, err_per_ord=False, fastccf=False):
'''
All input is optional, and needs to be called along
with its keyword. Below appears a list of the possible input
variables.
:param: template - the template for the CCF (see template class)
:param: dv - scalar. If the spectrum is not logarithmically
evenly-spaced it is resampled, with a stepsize
determined according to dv (in km/s). Default 0.1
:param: VelBounds - scalar. The velocity bounds for the CCF.
detemined according to [-VelBound, VelBound]
Default is 100 km/s
:param: err_per_ord - Boolean. Indicates if the error should be
calculated (from maximum likelihood) to each
order.
:return: self.Corr - a dictionary with the following fields:
'vel' - velocity vector at which correlation
was calculated.
'corr'- correlation matrix,
(# orders X length of velocity vector)
'RV' - Derived radial velocity
'eRV' - Corresponding uncertainty.
'peakCorr' - Corresponding CCF peak.
'''
# Initialize:
# ----------
template = deepcopy(template_in)
spec = deepcopy(spec_in)
if dv is None:
try:
dv = self.Info['GridDelta']
except:
dv = self.default_dv
elif type(dv) is not u.quantity.Quantity:
dv = float(dv) << u.kilometer / u.second
if type(VelBound) is not u.quantity.Quantity:
VelBound = np.array(VelBound) << dv.unit
if VelBound.size == 2:
Vi, Vrange = np.min(VelBound), np.abs(np.diff(VelBound))
elif VelBound.size == 1:
Vi, Vrange = -np.abs(VelBound), np.abs(2*VelBound)
# In case that the spectum is not logarithmically spaced,
# it must be interpolated to a logarithmically evenly-spaced
# grid. The parameter of the grid is dv [km/s].
if ('GridType' not in spec.Info) or ('GridDelta' not in spec.Info) or spec.Info['GridType'] == 'linear':
spec.InterpolateSpectrum(delta=dv, InterpMethod='log')
# If the data is already logarithmically spaced, then read the dv
# of the wavelegth grid (used to set the velocity axis of the CCF)
elif spec.Info['GridType'] == 'log':
dv = spec.Info['GridDelta']
else:
if spec.Info['GridType'] == 'linear':
spec.InterpolateSpectrum(dv, InterpMethod='log')
spec.Info['GridDelta'] = dv
spec.Info['GridUnits'] = 'velocity'
elif spec.Info['GridDelta'] != dv:
spec.InterpolateSpectrum(dv, InterpMethod='log')
spec.Info['GridDelta'] = dv
spec.Info['GridUnits'] = 'velocity'
# The cross correlation is performed on a velociry range defined by
# the user. The range is converted to a number of CCF lags, using
# the velocity spacing dv. Default is 100 km/s.
Nlags = np.floor((Vrange/dv).decompose().value)
Nord = len(spec.wv)
# Calculate the velocity from the lags
V = Vi + dv * np.arange(Nlags+1)
# Initialize arrays
corr = np.full((len(spec.wv), len(V)), np.nan)
RV = np.full((len(spec.wv), 1), np.nan)
eRV = np.full((len(spec.wv), 1), np.nan)
SpecCorr = np.full((len(spec.wv), 1), np.nan)
for I, w in enumerate(spec.wv):
# In order for the CCF to be normalized to the [-1,1] range
# the signals must be divided by their standard deviation.
s = spec.sp[I]
# Interpolate the template to the wavelength scale of the
# observations. We assume here that the template is broadened
# to match the width of the observed line profiles.
interpX = (np.asarray(template.model.wv[I][:]) *
(1+((Vi/self.c).decompose()).value))
interpY = np.asarray(template.model.sp[I][:])
InterpF = interpolate.interp1d(interpX,
interpY,
kind='quadratic')
GridChoice = np.logical_and(w > np.min(interpX),
w < np.max(interpX))
wGrid = np.extract(GridChoice, w)
spT = InterpF(wGrid)
# The data and the template are cross-correlated.
# We assume that the wavelengths are logarithmically-evenly spaced
C = self.correlate1d(spT, s, Nlags+1, fastccf=fastccf)
# Find the radial velocity by fitting a parabola to the CCF peaks
try:
if err_per_ord:
# Calculate the uncertainty of each order (if required)
vPeak, vPeakErr, ccfPeak, vUnit = self.extract_RV(V, C, n_ord=1)
else:
vPeak, ccfPeak = self.subpixel_CCF(V, C)
vPeakErr = np.NaN
except:
vPeak, vPeakErr, ccfPeak = np.NaN, np.NaN, np.NaN
corr[I, :] = C
RV[I] = vPeak
eRV[I] = vPeakErr
SpecCorr[I] = ccfPeak
self.Corr = {
'vel': V,
'corr': corr,
'RV': RV,
'eRV': eRV,
'units': V.unit,
'peakCorr': SpecCorr}
self.n_ord = Nord
return self
# =============================================================================
# =============================================================================
def CombineCCFs(self):
'''
This routine takes a matrix of CCF values (# orders X length of velocity vector)
that were calculated by the CrossCorrelateSpec routine, combines the CCFs into
a single one, based on a maximum-likelihood approach (Zucker, 2003, MNRAS).
The RV is derived from the peak of the cross-coreelation, and the uncertainty
is calculated as well
:param: none.
:return: A 'CorrCombined' dictionary, with derived combined correlation,
derived velocity and uncertainty. The structure is similar
to the 'Corr' dictionary.
NOTE: the number of lags is assumed to be identical for all orders
'''
# Arrage the correlation matrix
CorrMat = self.Corr['corr']
velocities = self.Corr['vel']
# Read the number of orders in the spectrum
Nord = self.n_ord
# Combine the CCFs according to Zucker (2003, MNRAS), section 3.1
CombinedCorr = np.sqrt(1-(np.prod(1-CorrMat**2, axis=0))**(1/Nord))
try:
V, eRV, CorrPeak, vUnit = self.extract_RV(velocities, CombinedCorr)
# Return the corresponding velocity grid.
self.CorrCombined = {
'vel': velocities,
'corr': CombinedCorr,
'RV': V,
'eRV': eRV,
'units': vUnit,
'peakCorr': CorrPeak}
except:
self.CorrCombined = {
'vel': velocities,
'corr': CombinedCorr,
'RV': np.nan,
'eRV': np.nan,
'units': '',
'peakCorr': np.nan}
return self
# =============================================================================
# =============================================================================
def extract_RV(self, x, y, vel=None, n_ord=None):
"""
Get the radial velocity and its uncertainty from maximum likelihood.
If velocity is given, the uncertainty at this specific point is calculated.
"""
if vel is None:
RV, ccf_value = self.subpixel_CCF(x, y)
else:
RV = self.subpixel_CCF(x, y, v=vel)
ccf_value = np.nan
if n_ord is None:
Nord = self.n_ord
else:
Nord = n_ord
Nvels = len(x)
# Generate the second derivative from spline
spl = interpolate.UnivariateSpline(x.value, y, k=4, s=0)
spl_vv = spl.derivative(n=2)
# Calculate the uncertainty:
ml_factor = spl_vv(RV) * spl(RV) / (1 - spl(RV)**2)
eRV = np.sqrt(-(ml_factor*Nord*Nvels)**(-1))
if type(x) is u.quantity.Quantity:
xUnit = x.unit
else:
xUnit = None
return RV, eRV, ccf_value, xUnit
# =============================================================================
# =============================================================================
def calcBIS(self, x, y, bisect_val=[0.35, 0.95], n_ord=None):
"""
Determine full-with-half-maximum of a peaked set of points, x and y.
Assumes that there is only one peak present in the datasset.
The function uses a spline interpolation of order k.
"""
y_low = np.max(y)*bisect_val[0]
y_high = np.max(y)*bisect_val[1]
s_low = interpolate.splrep(x.value, y - y_low)
s_high = interpolate.splrep(x.value, y - y_high)
roots_low = interpolate.sproot(s_low, mest=2)
roots_high = interpolate.sproot(s_high, mest=2)
if (len(roots_low) == 2) and (len(roots_high) == 2):
low = [self.subpixel_CCF(x, y, v=roots_low[0]),
self.subpixel_CCF(x, y, v=roots_low[1])]
high = [self.subpixel_CCF(x, y, v=roots_high[0]),
self.subpixel_CCF(x, y, v=roots_high[1])]
_, err, _, _ = self.extract_RV(x, y, n_ord=n_ord)
BIS = (low[1]+low[0])/2 - (high[1]+high[0])/2
eBIS = np.sqrt(2)*err
return BIS, eBIS, x.unit
else:
return np.nan, np.nan, None
def subpixel_CCF(self, vels, ccf, v=None, Npts=5):
"""
This function is using a second order approximation to estimate the ccf
at a given velocity, v. If no velocity was provided, the CCFs peak velocity
is returned.
:param vels: velocity array for a CCF at a given order.
:param ccf: CCF values that correspond with the velocities in x.
:return: Tuple containing parameters:x_max, y_max
"""
if type(vels) is u.quantity.Quantity:
vels = vels.value
if v is None:
assert Npts >= 3, "Must have at least 3 points."
assert Npts % 2 == 1, "Provide an odd number of points to fit around the peak."
x_n = np.argmax(ccf)
indlist =[int(x_n - Npts//2 + k) for k in np.arange(Npts)]
x = np.array(vels[indlist])
y = np.array(ccf[indlist])
# Define the design matrix at the given phases.
DesignMatrix = np.array(
[[1, x, x**2]
for x in x])
# Solve to obtain the parameters and uncertainties
C = (np.linalg.inv(
np.dot(DesignMatrix.transpose(), DesignMatrix)))
# Derive the parameters:
pars = C.dot(np.dot(DesignMatrix.transpose(), y))
y_max = pars[0] - pars[1] * pars[1] / (4 * pars[2]) # maximal CCF result value
x_max = -pars[1] / (2 * pars[2]) # velocity in maximal value
return x_max, y_max
else:
vels_diff = [i - v for i in vels]
x_n = np.argmin(np.abs(vels_diff))
indlist =[int(x_n - 1), int(x_n), int(x_n + 1)]
x = np.array(vels[indlist])
y = np.array(ccf[indlist])
y_interp = (y[0]*(v-x[1])*(v-x[2])/(x[0]-x[1])/(x[0]-x[2]) +
y[1]*(v-x[0])*(v-x[2])/(x[1]-x[0])/(x[1]-x[2]) +
y[2]*(v-x[0])*(v-x[1])/(x[2]-x[0])/(x[2]-x[1])
)
return y_interp
# =============================================================================
# =============================================================================
def correlate1d(self, template, signal, maxlag=None, fastccf=False):
'''
This is a wrapper of the jitted one-dimensional correlation
:param template: template (model) that will be compared to the signal.
:param signal: measured signal.
:param maxlag: maximum number of lags to calculate.
:return: the correlation values.
'''
if maxlag is None:
maxlag = 1
maxlag = np.int(np.minimum(signal.shape[0], maxlag))
# maxlag = np.int(np.minimum(t.shape[0], maxlag))
if not fastccf:
C = __correlate1d__(template, signal, maxlag)
else:
C = __correlate1d_fast__(template, signal, maxlag)
return C
# =============================================================================
# =============================================================================
def plotCCFs(self, PlotCombined=True, PlotSingleOrds=True, ords=None, alpha=0.125, **kwargs):
'''
Produce plots of the calculated CCFs.
:param PlotCombined: Boolean. Plot the combined CCF (if exists)
:param PlotSingleOrds: Boolean. Plot the ccf of each order required.
:param ords: a specifiec list of order numbers to plot.
:param kwargs: maybe will contain some plot spec.
:return: fig object
'''
if ords is None:
ords = np.arange(self.n_ord)
fig = plt.figure(figsize=(13, 4), dpi= 80, facecolor='w', edgecolor='k')
if PlotSingleOrds:
for o in ords:
plt.plot(self.Corr['vel'].value,
self.Corr['corr'][o], 'k', alpha=alpha, linewidth=0.75)
if PlotCombined:
try:
plt.plot(self.CorrCombined['vel'],
self.CorrCombined['corr'], 'k', linewidth=2.5)
plt.axvspan(self.CorrCombined['RV']-self.CorrCombined['eRV'],
self.CorrCombined['RV']+self.CorrCombined['eRV'],
color='red', alpha=0.35)
except AttributeError:
pass
plt.xlabel(r'Velocity ' + '[' + str(self.Corr['vel'].unit) + ']')
plt.ylabel(r'CCF')
plt.grid()
return fig
@njit
def __correlate1d__(template, signal, maxlag):
"""
Compute correlation of two signals defined at uniformly-spaced points.
The correlation is defined only for positive lags. The zero shift is represented as 1 lag.
The input arrays represent signals sampled at evenly-spaced points.
Arguments:
:param template: the template (model) that is compared to the observed signal
:param signal: the observed signal
:param maxlag: maximum number of lags to calculate.
:return: an array with Pearsons correlation for each lag.
"""
# Initialize an empty array
C = np.full(maxlag, np.nan)
# Calculate the cross-correlation
for lag in range(C.size):
template_max = np.minimum(signal.size - lag, template.size)
signal_max = np.minimum(signal.size, template.size + lag)
C[lag] = np.sum(template[:template_max] * signal[lag:signal_max])
# Calculate the normalization factor
normFac = np.sqrt((template**2).sum() * (signal**2).sum())
return C/normFac
def __correlate1d_fast__(template, signal, maxlag):
"""
:param template: the template (model) that is compared to the observed signal
:param signal: the observed signal
:param maxlag: maximum number of lags to calculate
:return: an array of Pearsons correlation for each lag
"""
FC = correlate(signal, template, mode='full', method='fft')
normFac = np.sqrt((template**2).sum() * (signal**2).sum())
N = len(template) - 1
C = FC[N: N+maxlag]
return C/normFac |
'''
时刻要记得:数组是可变类型。
'''
list_01 = ['a']
list_02 = list_01 * 4
print(list_02)
list_01.append('b')
print(list_02)
'''
上面这个例子中,两次输出结果都是 ['a', 'a', 'a', 'a']
没有什么疑问,有意思的是下面这个操作
'''
list_03 = [[]]
list_04 = list_03 * 4
print(list_04)
list_03[0].append('a')
print(list_04)
'''
这个例子中,第一次输出的是[[], [], [], []]
而第二次输出的则是[['a'], ['a'], ['a'], ['a']]
原因在于:上个例子中,外层list包含的内容是不可变变量,而这个例子中,外层包含的是可变变量list
在进行*4操作的时候,仅仅是把这个id复制了4份,内部的list指向的仍然是list_03[0],所以,当list_03[0]发生变化的时候
变化会反应到list_04上
所以,进行这种操作的时候,需要使用深拷贝,或者使用下面的方法
'''
list_05 = [[] for __ in range(4)]
'''
这样生成的list就不会有上面那个例子的问题
小技巧:一般我们采用 单下划线来表示值被弃用。但是单下划线是交互模式中表示上一个值的变量,而且有可能与某些常用别名冲突
所以在这里,我们使用了双下划线表示弃用值。(虽然sonarlint会警告)
'''
|
import numpy as np
from scipy.spatial import cKDTree as KDTree
from pypolycontain.lib.zonotope import zonotope
from collections import deque
from pypolycontain.lib.AH_polytope import AH_polytope,to_AH_polytope
from pypolycontain.lib.operations import distance_point_polytope
from pypolycontain.lib.polytope import polytope
from pypolycontain.lib.containment_encodings import subset_generic,constraints_AB_eq_CD,add_Var_matrix
from pypolycontain.utils.random_polytope_generator import get_k_random_edge_points_in_zonotope
from gurobipy import Model, GRB, QuadExpr
import itertools
from multiprocessing import Pool
from timeit import default_timer
def set_polytope_pair_distance(arguments):
key_points, key_point_to_polytope_map, polytope_index, key_point_index = arguments
key_point = key_points[key_point_index]
key_point_string = str(key_point)
polytope = key_point_to_polytope_map[key_point_string]['polytopes'][polytope_index]
return distance_point_polytope(to_AH_polytope(polytope), key_point, ball='l2')[0]
class VoronoiClosestPolytope:
def __init__(self, polytopes, key_vertices_count=0, process_count=8, max_number_key_points = None):
'''
Compute the closest polytope using Voronoi cells
:param polytopes:
'''
self.init_start_time = default_timer()
self.section_start_time = self.init_start_time
self.polytopes = np.asarray(polytopes, dtype='object')
self.type = self.polytopes[0].type
self.process_count = process_count
self.key_vertices_count = key_vertices_count
if self.type == 'AH_polytope':
self.dim = self.polytopes[0].t.shape[0]
elif self.type == 'zonotope':
self.dim =self.polytopes[0].x.shape[0]
else:
raise NotImplementedError
if self.key_vertices_count>0:
self.key_points = np.zeros([len(self.polytopes) * (1 + 2 ** self.key_vertices_count), self.dim])
else:
self.key_points = np.zeros([len(self.polytopes), self.dim])
for i, z in enumerate(polytopes):
if self.type == 'AH_polytope':
if self.key_vertices_count>0:
raise NotImplementedError
else:
self.key_points[i, :] = self.polytopes[i].t[:, 0]
elif self.type == 'zonotope':
if self.key_vertices_count>0:
self.key_points[i * (2 ** self.key_vertices_count + 1), :] = self.polytopes[i].x[:, 0]
self.key_points[i*(2 ** self.key_vertices_count + 1)+1:(i + 1) * (2 ** self.key_vertices_count + 1), :] = get_k_random_edge_points_in_zonotope(self.polytopes[i], self.key_vertices_count)
else:
self.key_points[i, :] = self.polytopes[i].x[:, 0]
else:
raise NotImplementedError
if max_number_key_points:
# sample the key points
n = self.key_points.shape[0]
chosen_key_points = np.random.choice(n, size=min(n, max_number_key_points), replace=False)
self.key_points = self.key_points[chosen_key_points, :]
# print(self.key_points.shape)
self.key_point_to_polytope_map = dict() # stores the potential closest polytopes associated with each Voronoi (centroid)
for key_point in self.key_points:
ds = np.zeros(self.polytopes.shape[0])
self.key_point_to_polytope_map[str(key_point)] = np.rec.fromarrays([self.polytopes, ds], names=('polytopes', 'distances'))
self.build_cell_polytope_map_default()
#build kd-tree for centroids
self.key_point_tree = KDTree(self.key_points)
print(('Completed precomputation in %f seconds' % (default_timer() - self.init_start_time)))
def build_cell_polytope_map_default(self):
polytope_key_point_indices = np.array(np.meshgrid(np.arange(self.polytopes.shape[0]), np.arange(self.key_points.shape[0]))).T.reshape(-1, 2)
arguments = []
for i in polytope_key_point_indices:
arguments.append((self.key_points, self.key_point_to_polytope_map, i[0], i[1]))
p = Pool(self.process_count)
pca = p.map(set_polytope_pair_distance, arguments)
polytope_key_point_arrays=np.asarray(pca).reshape((self.polytopes.shape[0]), self.key_points.shape[0])
# print(polytope_centroid_arrays)
# compute pairwise distances of the centroids and the polytopes
#fixme
for key_point_index, key_point in enumerate(self.key_points):
key_point_string = str(key_point)
for polytope_index, polytope in enumerate(self.key_point_to_polytope_map[key_point_string]['polytopes']):
self.key_point_to_polytope_map[str(key_point)].distances[polytope_index] = polytope_key_point_arrays[polytope_index, key_point_index]
# print(polytope_key_point_arrays[polytope_index, key_point_index])
self.key_point_to_polytope_map[key_point_string].sort(order='distances')
# print(self.centroid_to_polytope_map[centroid_string])
def find_closest_polytope(self, query_point, return_intermediate_info = False):
#find the closest centroid
d,i = self.key_point_tree.query(query_point)
closest_key_point = self.key_point_tree.data[i]
# print('closest key point', closest_key_point)
closest_key_point_polytope = self.key_point_to_polytope_map[str(closest_key_point)]['polytopes'][0]
# print('closest polytope centroid' + str(closest_key_point_polytope.x))
dist_query_centroid_polytope = distance_point_polytope(closest_key_point_polytope, query_point, ball='l2')[0]
dist_query_key_point = np.linalg.norm(query_point-closest_key_point)
# print(dist_query_key_point, dist_query_centroid_polytope)
cutoff_index = np.searchsorted(self.key_point_to_polytope_map[str(closest_key_point)].distances, dist_query_key_point + dist_query_centroid_polytope)
# print(cutoff_index)
# print(self.key_point_to_polytope_map[str(closest_key_point)]['distances'][0:cutoff_index])
# print(self.key_point_to_polytope_map[str(closest_key_point)]['distances'][cutoff_index:])
# print('dqc',dist_query_key_point)
# print(self.centroid_to_polytope_map[str(closest_key_point)].distances)
closest_polytope_candidates = self.key_point_to_polytope_map[str(closest_key_point)].polytopes[0:cutoff_index]
# print(closest_polytope_candidates)
best_polytope = None
best_distance = np.inf
for polytope in closest_polytope_candidates:
if best_distance < 1e-9:
break
dist = distance_point_polytope(polytope, query_point, ball='l2')[0]
if best_distance>dist:
best_distance = dist
best_polytope = polytope
# print('best distance', best_distance)
if return_intermediate_info:
return best_polytope, best_distance, closest_polytope_candidates
return best_polytope |
"""
300. Longest Increasing Subsequence
Given an unsorted array of integers, find the length of longest increasing subsequence.
For example,
Given [10, 9, 2, 5, 3, 7, 101, 18],
The longest increasing subsequence is [2, 3, 7, 101], therefore the length is 4.
Note that there may be more than one LIS combination, it is only necessary for you to return the length.
Your algorithm should run in O(n2) complexity.
Follow up: Could you improve it to O(n log n) time complexity?
History:
2018.06.09
2019.07.10
"""
import bisect
class Solution:
def lengthOfLIS_dp(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
dp = [1] * len(nums) # length of the sequence
tail = [0] * len(nums) # number at the tail of the sequence
tail[0] = nums[0]
max_dp = dp[0]
for i in range(1,len(nums)):
for j in range(0, i):
if nums[i] > nums[j] and dp[j]+1>dp[i]:
dp[i] = dp[j]+1
if dp[i] > max_dp:
max_dp = dp[i]
return max_dp
def lengthOfLIS(self, nums):
# DP with binary search
n = len(nums)
if n == 0:
return 0
seq = [nums[0]]
for i in range(1, n):
if nums[i] > seq[-1]:
seq.append(nums[i])
else:
k = bisect.bisect_left(seq, nums[i])
seq.pop(k)
seq.insert(k, nums[i])
return len(seq)
if __name__ == "__main__":
sol = Solution()
method = sol.lengthOfLIS
cases = [
# (method, ([[5,4],[6,7],[6,4],[2,3]],), 3),
# (method, ([[4,5],[4,6],[6,7],[2,3],[1,1]],), 4),
# (method, ([[46,89],[50,53],[52,68],[72,45],[77,81]],), 3),
(method, ([10, 9, 2, 5, 3, 7, 101, 18],), 4),
# (method, ([[2,100],[3,200],[4,300],[5,500],[5,400],[5,250],[6,370],[6,360],[7,380]],), 5),
]
for i, (func, case, expected) in enumerate(cases):
ans = func(*case)
if ans == expected:
print("Case {:d} Passed".format(i + 1))
else:
print("Case {:d} Failed; Expected {:s} != {:s}".format(i + 1, str(expected), str(ans)))
|
# 4.5 Validate BST
# Implement a function to check if a binary tree is a binary search tree.
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Insert the given value into the tree
def insert(self, value):
# if value is less than current value, go left
if value < self.value:
if not self.left:
self.left = BinarySearchTree(value)
else:
self.left.insert(value)
# if value is greater than/equal to current value, go right
elif value >= self.value:
if not self.right:
self.right = BinarySearchTree(value)
else:
self.right.insert(value)
def validate_bst(self, root):
if root:
if root.left < root:
validate_bst(root.left)
else:
return False
if root.right > root:
validate_bst(root.right)
else:
return False
return True
|
def gcd(a, b):
best = 1
for num in range(2, min(a, b) + 1):
if a % num == 0 and b % num == 0:
if num > best:
best = num
return best
if __name__ == '__main__':
a, b = map(int, input().split())
print(gcd(a, b))
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request,"home.html")
def home(request):
return render(request,"home.html")
def bollywood(request):
return render(request,"bollywood.html")
def hollywood(request):
return render(request,"hollywood.html")
def pcgames(request):
return render(request,"pcgames.html")
def music(request):
return render(request,"music.html")
def search(request):
val = request.GET["search"]
print(val)
# return render(request,"home.html") |
from typing import Tuple, Union, Iterable, List, Callable, Dict, Optional
from nnuncert.models._network import MakeNet
from nnuncert.models.dnnc import DNNCModel, DNNCRidge, DNNCHorseshoe, DNNCPred
from nnuncert.models.mc_dropout import DropoutTF, MCDropout, MCDropoutPred
from nnuncert.models.ensemble import Ensemble, PNNEnsemble, NLMEnsemble, EnsPredGauss
from nnuncert.models.gp import GPModel, GPPred
from nnuncert.models.nlm import NLM, NLMPred
from nnuncert.models.pbp import PBPModel, PBPPred
from nnuncert.models.pnn import PNN, PNNPred
STR2TYPE = {
"DNNC-R" : DNNCRidge,
"DNNC-HS" : DNNCHorseshoe,
"MCDropout" : MCDropout,
"MC Dropout" : MCDropout,
"MC dropout" : MCDropout,
"PNN" : PNN,
"Deep emsemble" : PNNEnsemble,
"GP" : GPModel,
"GP-ReLU" : GPModel,
"PNN-E" : PNNEnsemble,
"NLM" : NLM,
"NLM-E" : NLMEnsemble,
"PBP" : PBPModel,
}
def make_network(model_type: Union[type, str],
input_shape: Tuple,
architecture: List[Tuple[int, str, float]],
*args, **kwargs) -> MakeNet:
"""Generate network with 'architecture' for given 'model_type'.
Parameters
----------
model_type : Union[type, str]
Model to generate network for.
input_shape : Tuple
Shape of inputs for neural network.
architecture : List[Tuple[int, str, float]]
Network architecture, per hidden layer:
[Number of hidden units, activation function in layer, dropout rate]
Returns
-------
MakeNet
Network to used as input for model initialization.
"""
if isinstance(model_type, str):
model_type = STR2TYPE[model_type]
MakeNetDict = {
DNNCModel : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
DNNCRidge : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
DNNCHorseshoe : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
MCDropout : MakeNet.joint(input_shape, architecture, dropout_type=DropoutTF, *args, **kwargs),
PNN : MakeNet.joint(input_shape, architecture, *args, **kwargs),
PNNEnsemble : MakeNet.joint(input_shape, architecture, *args, **kwargs),
NLM : MakeNet.joint(input_shape, architecture, *args, **kwargs),
NLMEnsemble : MakeNet.joint(input_shape, architecture, *args, **kwargs),
PBPModel : MakeNet.joint(input_shape, architecture, *args, **kwargs),
GPModel : MakeNet.mean_only(input_shape, architecture, *args, **kwargs),
}
return MakeNetDict[model_type]
def make_model(model_type: Union[type, str],
input_shape: Tuple,
architecture: List[Tuple[int, str, float]],
net_kwargs: Optional[Dict] = {},
*args, **kwargs):
"""Initialize model with given architecture.
Parameters
----------
model_type : Union[type, str]
Model to generate network for.
input_shape : Tuple
Shape of inputs for neural network.
architecture : List[Tuple[int, str, float]]
Network architecture, per hidden layer:
[Number of hidden units, activation function in layer, dropout rate]
net_kwargs : Optional[Dict]
Arguments to be passed to MakeNet creator function.
"""
if isinstance(model_type, str):
model_type = STR2TYPE[model_type]
# generate network
net = make_network(model_type, input_shape, architecture, **net_kwargs)
# init model
model = model_type(net, *args, **kwargs)
return model
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to automatically determine the moment magnitudes of a larger number of
events.
The script will write one output file containing all events with one additional
magnitude.
Some configuration is required. Please edit the uppercase variables right after
all the imports to suit your needs.
All events need to be stored in ONE QuakeML file. Every event has to a larger
number of picks. Furthermore waveform data for all picks is necessary and
station information as (dataless)SEED files for every station.
The script could use some heavy refactoring but its program flow is quite
linear and it works well enough.
Requirements:
* numpy
* scipy
* matplotlib
* ObsPy
* colorama
* progressbar
* mtspec (https://github.com/krischer/mtspec)
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2012
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
import colorama
import glob
import matplotlib.pylab as plt
import mtspec
import numpy as np
from obspy import read, Stream
from obspy.core.event import readEvents, Comment, Magnitude, Catalog
from obspy.xseed import Parser
import progressbar
import scipy
import scipy.optimize
import warnings
# Rock density in km/m^3.
DENSITY = 2700.0
# Velocities in m/s.
V_P = 4800.0
V_S = V_P / 1.73
# How many seconds before and after the pick to choose for calculating the
# spectra.
TIME_BEFORE_PICK = 0.2
TIME_AFTER_PICK = 0.8
PADDING = 20
WATERLEVEL = 10.0
# Fixed quality factor. Very unstable inversion for it. Has almost no influence
# on the final seismic moment estimations but has some influence on the corner
# frequency estimation and therefore on the source radius estimation.
QUALITY_FACTOR = 1000
# Specifiy where to find the files. One large event file contain all events and
# an arbitrary number of waveform and station information files.
EVENT_FILES = glob.glob("events/*")
STATION_FILES = glob.glob("stations/*")
WAVEFORM_FILES = glob.glob("waveforms/*")
# Where to write the output file to.
OUTPUT_FILE = "events_with_moment_magnitudes.xml"
def fit_spectrum(spectrum, frequencies, traveltime, initial_omega_0,
initial_f_c):
"""
Fit a theoretical source spectrum to a measured source spectrum.
Uses a Levenburg-Marquardt algorithm.
:param spectrum: The measured source spectrum.
:param frequencies: The corresponding frequencies.
:para traveltime: Event traveltime in [s].
:param initial_omega_0: Initial guess for Omega_0.
:param initial_f_c: Initial guess for the corner frequency.
:returns: Best fits and standard deviations.
(Omega_0, f_c, Omega_0_std, f_c_std)
Returns None, if the fit failed.
"""
def f(frequencies, omega_0, f_c):
return calculate_source_spectrum(frequencies, omega_0, f_c,
QUALITY_FACTOR, traveltime)
popt, pcov = scipy.optimize.curve_fit(f, frequencies, spectrum, \
p0=list([initial_omega_0, initial_f_c]), maxfev=100000)
if popt is None:
return None
return popt[0], popt[1], pcov[0, 0], pcov[1, 1]
def calculate_source_spectrum(frequencies, omega_0, corner_frequency, Q,
traveltime):
"""
After Abercrombie (1995) and Boatwright (1980).
Abercrombie, R. E. (1995). Earthquake locations using single-station deep
borehole recordings: Implications for microseismicity on the San Andreas
fault in southern California. Journal of Geophysical Research, 100,
24003–24013.
Boatwright, J. (1980). A spectral theory for circular seismic sources,
simple estimates of source dimension, dynamic stress drop, and radiated
energy. Bulletin of the Seismological Society of America, 70(1).
The used formula is:
Omega(f) = (Omege(0) * e^(-pi * f * T / Q)) / (1 + (f/f_c)^4) ^ 0.5
:param frequencies: Input array to perform the calculation on.
:param omega_0: Low frequency amplitude in [meter x second].
:param corner_frequency: Corner frequency in [Hz].
:param Q: Quality factor.
:param traveltime: Traveltime in [s].
"""
num = omega_0 * np.exp(-np.pi * frequencies * traveltime / Q)
denom = (1 + (frequencies / corner_frequency) ** 4) ** 0.5
return num / denom
def calculate_moment_magnitudes(cat, output_file):
"""
:param cat: obspy.core.event.Catalog object.
"""
Mws = []
Mls = []
Mws_std = []
for event in cat:
if not event.origins:
print "No origin for event %s" % event.resource_id
continue
if not event.magnitudes:
print "No magnitude for event %s" % event.resource_id
continue
origin_time = event.origins[0].time
local_magnitude = event.magnitudes[0].mag
#if local_magnitude < 1.0:
#continue
moments = []
source_radii = []
corner_frequencies = []
for pick in event.picks:
# Only p phase picks.
if pick.phase_hint.lower() == "p":
radiation_pattern = 0.52
velocity = V_P
k = 0.32
elif pick.phase_hint.lower() == "s":
radiation_pattern = 0.63
velocity = V_S
k = 0.21
else:
continue
distance = (pick.time - origin_time) * velocity
if distance <= 0.0:
continue
stream = get_corresponding_stream(pick.waveform_id, pick.time,
PADDING)
if stream is None or len(stream) != 3:
continue
omegas = []
corner_freqs = []
for trace in stream:
# Get the index of the pick.
pick_index = int(round((pick.time - trace.stats.starttime) / \
trace.stats.delta))
# Choose date window 0.5 seconds before and 1 second after pick.
data_window = trace.data[pick_index - \
int(TIME_BEFORE_PICK * trace.stats.sampling_rate): \
pick_index + int(TIME_AFTER_PICK * trace.stats.sampling_rate)]
# Calculate the spectrum.
spec, freq = mtspec.mtspec(data_window, trace.stats.delta, 2)
try:
fit = fit_spectrum(spec, freq, pick.time - origin_time,
spec.max(), 10.0)
except:
continue
if fit is None:
continue
Omega_0, f_c, err, _ = fit
Omega_0 = np.sqrt(Omega_0)
omegas.append(Omega_0)
corner_freqs.append(f_c)
M_0 = 4.0 * np.pi * DENSITY * velocity ** 3 * distance * \
np.sqrt(omegas[0] ** 2 + omegas[1] ** 2 + omegas[2] ** 2) / \
radiation_pattern
r = 3 * k * V_S / sum(corner_freqs)
moments.append(M_0)
source_radii.append(r)
corner_frequencies.extend(corner_freqs)
if not len(moments):
print "No moments could be calculated for event %s" % \
event.resource_id.resource_id
continue
# Calculate the seismic moment via basic statistics.
moments = np.array(moments)
moment = moments.mean()
moment_std = moments.std()
corner_frequencies = np.array(corner_frequencies)
corner_frequency = corner_frequencies.mean()
corner_frequency_std = corner_frequencies.std()
# Calculate the source radius.
source_radii = np.array(source_radii)
source_radius = source_radii.mean()
source_radius_std = source_radii.std()
# Calculate the stress drop of the event based on the average moment and
# source radii.
stress_drop = (7 * moment) / (16 * source_radius ** 3)
stress_drop_std = np.sqrt((stress_drop ** 2) * \
(((moment_std ** 2) / (moment ** 2)) + \
(9 * source_radius * source_radius_std ** 2)))
if source_radius > 0 and source_radius_std < source_radius:
print "Source radius:", source_radius, " Std:", source_radius_std
print "Stress drop:", stress_drop / 1E5, " Std:", stress_drop_std / 1E5
Mw = 2.0 / 3.0 * (np.log10(moment) - 9.1)
Mw_std = 2.0 / 3.0 * moment_std / (moment * np.log(10))
Mws_std.append(Mw_std)
Mws.append(Mw)
Mls.append(local_magnitude)
calc_diff = abs(Mw - local_magnitude)
Mw = ("%.3f" % Mw).rjust(7)
Ml = ("%.3f" % local_magnitude).rjust(7)
diff = ("%.3e" % calc_diff).rjust(7)
ret_string = colorama.Fore.GREEN + \
"For event %s: Ml=%s | Mw=%s | " % (event.resource_id.resource_id,
Ml, Mw)
if calc_diff >= 1.0:
ret_string += colorama.Fore.RED
ret_string += "Diff=%s" % diff
ret_string += colorama.Fore.GREEN
ret_string += " | Determined at %i stations" % len(moments)
ret_string += colorama.Style.RESET_ALL
print ret_string
mag = Magnitude()
mag.mag = Mw
mag.mag_errors.uncertainty = Mw_std
mag.magnitude_type = "Mw"
mag.origin_id = event.origins[0].resource_id
mag.method_id = "smi:com.github/krischer/moment_magnitude_calculator/automatic/1"
mag.station_count = len(moments)
mag.evaluation_mode = "automatic"
mag.evaluation_status = "preliminary"
mag.comments.append(Comment( \
"Seismic Moment=%e Nm; standard deviation=%e" % (moment,
moment_std)))
mag.comments.append(Comment("Custom fit to Boatwright spectrum"))
if source_radius > 0 and source_radius_std < source_radius:
mag.comments.append(Comment( \
"Source radius=%.2fm; standard deviation=%.2f" % (source_radius,
source_radius_std)))
event.magnitudes.append(mag)
print "Writing output file..."
cat.write(output_file, format="quakeml")
def fit_moment_magnitude_relation_curve(Mls, Mws, Mw_stds):
"""
Fits a quadratic curve to
Mw = a + b * Ml + c * Ml ** 2
Returns the best fitting [a, b, c]
"""
def y(x, a, b, c):
return a + b * x + c * x ** 2
# Use a straight line as starting point.
Mls = np.ma.masked_invalid(Mls)
Mws = np.ma.masked_invalid(Mws)
inds = ~(Mls.mask | Mws.mask | np.isnan(Mw_stds) | (Mw_stds <= 0))
popt, pcov = scipy.optimize.curve_fit(y, Mls[inds], Mws[inds], \
p0=[0.0, 1.0, 0.0], sigma=Mw_stds[inds], maxfev=100000)
return popt[0], popt[1], popt[2]
def plot_ml_vs_mw(catalog):
moment_magnitudes = []
moment_magnitudes_std = []
local_magnitudes = []
local_magnitudes_std = []
for event in catalog:
Mw = None
Mw_std = None
Ml = None
Ml_std = None
for mag in event.magnitudes:
if Mw is not None and Ml is not None:
break
mag_type = mag.magnitude_type.lower()
if mag_type == "mw":
if Mw is not None:
continue
Mw = mag.mag
Mw_std = mag.mag_errors.uncertainty
elif mag_type == "ml":
if Ml is not None:
continue
Ml = mag.mag
Ml_std = mag.mag_errors.uncertainty
moment_magnitudes.append(Mw)
moment_magnitudes_std.append(Mw_std)
local_magnitudes.append(Ml)
local_magnitudes_std.append(Ml_std)
moment_magnitudes = np.array(moment_magnitudes, dtype="float64")
moment_magnitudes_std = np.array(moment_magnitudes_std, dtype="float64")
local_magnitudes = np.array(local_magnitudes, dtype="float64")
local_magnitudes_std = np.array(local_magnitudes_std, dtype="float64")
# Fit a curve through the data.
a, b, c = fit_moment_magnitude_relation_curve(local_magnitudes,
moment_magnitudes, moment_magnitudes_std)
x_values = np.linspace(-2.0, 4.0, 10000)
fit_curve = a + b * x_values + c * x_values ** 2
plt.figure(figsize=(10, 8))
# Show the data values as dots.
plt.scatter(local_magnitudes, moment_magnitudes, color="blue",
edgecolor="black")
# Plot the Ml=Mw line.
plt.plot(x_values, x_values, label="$Mw=Ml$", color="k", alpha=0.8)
plt.plot(x_values, 0.67 + 0.56 * x_values + 0.046 * x_values ** 2,
label="$Mw=0.67 + 0.56Ml + 0.046Ml^2 (gruenthal etal 2003)$", color="green", ls="--")
plt.plot(x_values, 0.53 + 0.646 * x_values + 0.0376 * x_values ** 2,
label="$Mw=0.53 + 0.646Ml + 0.0376Ml^2 (gruenthal etal 2009)$", color="green")
plt.plot(x_values, 0.594 * x_values + 0.985,
label="$Mw=0.985 + 0.594Ml (goertz-allmann etal 2011)$", color="orange")
plt.plot(x_values, (x_values + 1.21) / 1.58,
label="$Mw=(Ml + 1.21) / 1.58 (bethmann etal 2011)$", color="red")
plt.plot(x_values, fit_curve, color="blue",
label="$Data$ $fit$ $with$ $Mw=%.2f + %.2fMl + %.3fMl^2$" % (a, b, c))
# Set limits and labels.
plt.xlim(-2, 4)
plt.ylim(-2, 4)
plt.xlabel("Ml", fontsize="x-large")
plt.ylabel("Mw", fontsize="x-large")
# Show grid and legend.
plt.grid()
plt.legend(loc="lower right")
plt.savefig("moment_mag_automatic.pdf")
def plot_source_radius(cat):
mw = []
mw_std = []
source_radius = []
source_radius_std = []
plt.figure(figsize=(10, 4.5))
# Read the source radius.
for event in cat:
mag = event.magnitudes[1]
if len(mag.comments) != 2:
continue
mw.append(mag.mag)
mw_std.append(mag.mag_errors.uncertainty)
sr, std = mag.comments[1].text.split(";")
_, sr = sr.split("=")
_, std = std.split("=")
sr = float(sr[:-1])
std = float(std)
source_radius.append(sr)
source_radius_std.append(std)
plt.errorbar(mw, source_radius, yerr=source_radius_std,
fmt="o", linestyle="None")
plt.xlabel("Mw", fontsize="x-large")
plt.ylabel("Source Radius [m]", fontsize="x-large")
plt.grid()
plt.savefig("/Users/lion/Desktop/SourceRadius.pdf")
if __name__ == "__main__":
# Read all instrument responses.
widgets = ['Parsing instrument responses...', progressbar.Percentage(),
' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(widgets=widgets,
maxval=len(STATION_FILES)).start()
parsers = {}
# Read all waveform files.
for _i, xseed in enumerate(STATION_FILES):
pbar.update(_i)
parser = Parser(xseed)
channels = [c['channel_id'] for c in parser.getInventory()['channels']]
parsers_ = dict.fromkeys(channels, parser)
if any([k in parsers for k in parsers_.keys()]):
msg = "Channel(s) defined in more than one metadata file."
warnings.warn(msg)
parsers.update(parsers_)
pbar.finish()
# Parse all waveform files.
widgets = ['Indexing waveform files... ', progressbar.Percentage(),
' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(widgets=widgets,
maxval=len(WAVEFORM_FILES)).start()
waveform_index = {}
# Read all waveform files.
for _i, waveform in enumerate(WAVEFORM_FILES):
pbar.update(_i)
st = read(waveform)
for trace in st:
if not trace.id in waveform_index:
waveform_index[trace.id] = []
waveform_index[trace.id].append( \
{"filename": waveform,
"starttime": trace.stats.starttime,
"endtime": trace.stats.endtime})
pbar.finish()
# Define it inplace to create a closure for the waveform_index dictionary
# because I am too lazy to fix the global variable issue right now...
def get_corresponding_stream(waveform_id, pick_time, padding=1.0):
"""
Helper function to find a requested waveform in the previously created
waveform_index file.
Also performs the instrument correction.
Returns None if the file could not be found.
"""
trace_ids = [waveform_id.getSEEDString()[:-1] + comp for comp in "ZNE"]
st = Stream()
start = pick_time - padding
end = pick_time + padding
for trace_id in trace_ids:
for waveform in waveform_index.get(trace_id, []):
if waveform["starttime"] > start:
continue
if waveform["endtime"] < end:
continue
st += read(waveform["filename"]).select(id=trace_id)
for trace in st:
paz = parsers[trace.id].getPAZ(trace.id, start)
# PAZ in SEED correct to m/s. Add a zero to correct to m.
paz["zeros"].append(0 + 0j)
trace.detrend()
trace.simulate(paz_remove=paz, water_level=WATERLEVEL)
return st
print "Reading all events."
cat = Catalog()
for filename in EVENT_FILES:
cat += readEvents(filename)
print "Done reading all events."
# Will edit the Catalog object inplace.
calculate_moment_magnitudes(cat, output_file=OUTPUT_FILE)
# Plot it.
plot_ml_vs_mw(cat)
|
import x3dpsail
(x3dpsail.ProtoBody()
# Initial node of ProtoBody determines prototype node type
.addChild(x3dpsail.TouchSensor().setDescription(x3dpsail.SFString("within ProtoBody"))
.setIS(x3dpsail.IS()
.addConnect(x3dpsail.connect()))))
|
#!/usr/bin/env python3
from networkx.utils import open_file
import pickle
import sys
@open_file(0, mode='rb')
def read_gpickle(path):
return pickle.load(path)
G = read_gpickle(sys.argv[1])
import code, readline, rlcompleter
readline.parse_and_bind('tab: complete')
code.InteractiveConsole(locals()).interact()
|
from __future__ import absolute_import
from celery import Celery
from nlweb.app import create_app, load_celery_config
from nlweb.extensions import opbeat
from opbeat.contrib.celery import register_signal
def make_celery(app):
celery_obj = Celery(app.import_name)
load_celery_config(celery_obj)
TaskBase = celery_obj.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery_obj.Task = ContextTask
return celery_obj
flask_app = create_app()
celery = make_celery(flask_app)
if not flask_app.debug:
register_signal(opbeat.client)
|
#!/usr/bin/python3
"""State rule module"""
from api.v1.views import app_views
from flask import jsonify, abort, make_response, request
from models import storage
from models.city import City
from models.place import Place
from models.user import User
from models.state import State
from models.amenity import Amenity
@app_views.route('/cities/<city_id>/places',
methods=['GET'], strict_slashes=False)
def get_all_places(city_id):
"""get all places"""
city = storage.get(City, city_id)
if city is None:
abort(404)
places = []
for place in city.places:
places.append(place.to_dict())
return jsonify(places)
@app_views.route('/places/<place_id>',
methods=['GET'], strict_slashes=False)
def get_place(place_id):
"""get place by id"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
return jsonify(place.to_dict())
@app_views.route('/places/<place_id>',
methods=['DELETE'], strict_slashes=False)
def delete_place(place_id):
"""delete place with id"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
storage.delete(place)
storage.save()
return make_response(jsonify({}), 200)
@app_views.route('/cities/<city_id>/places',
methods=['POST'], strict_slashes=False)
def post_place(city_id):
"""create place"""
city = storage.get(City, city_id)
if city is None:
abort(404)
data = request.get_json()
if data is None or type(data) != dict:
return make_response("Not a JSON", 400)
if "user_id" not in data:
return make_response("Missing user_id", 400)
user = storage.get(User, data['user_id'])
if user is None:
abort(404)
if "name" not in data:
return make_response("Missing name", 400)
data["city_id"] = city_id
new_place = Place(**data)
storage.new(new_place)
storage.save()
return make_response(jsonify(new_place.to_dict()), 201)
@app_views.route('/places/<place_id>', methods=['PUT'], strict_slashes=False)
def put_place(place_id):
"""update place"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
data = request.get_json()
if data is None or type(data) != dict:
return make_response("Not a JSON", 400)
for key, value in data.items():
if key not in ['id', 'created_at', 'updated_at']:
setattr(place, key, value)
storage.save()
return make_response(jsonify(place.to_dict()), 200)
@app_views.route('/places_search',
methods=['POST'], strict_slashes=False)
def get_places_search():
"""Get a list with all places"""
data = request.get_json()
if data is None or type(data) != dict:
return make_response("Not a JSON", 400)
places = []
states = data.get('states', [])
cities = data.get('cities', [])
amenities = data.get('amenities', [])
if states == [] and cities == [] and amenities == []:
for place in storage.all(Place).values():
places.append(place.to_dict())
return jsonify(places)
if states != []:
for state_id in data['states']:
state = storage.get(State, state_id)
if state is not None:
for city in state.cities:
for place in city.places:
places.append(place.to_dict())
if cities != []:
for city_id in data['cities']:
city = storage.get(City, city_id)
if city is not None:
for place in city.places:
places.append(place.to_dict())
return jsonify(places)
|
import os
import gzip
import numpy as np
from scipy import io
import cPickle as pickle
import os
import gzip
import numpy as np
from scipy import io
import cPickle as pickle
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def load_mnist(base='./data/mnist'):
"""
load_mnist taken from https://github.com/Lasagne/Lasagne/blob/master/examples/images.py
:param base: base path to images dataset
"""
def load_mnist_images(filename):
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 1, 28, 28)
return data / np.float32(256)
def load_mnist_labels(filename):
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
return data
# We can now download and read the training and test set image and labels.
X_train = load_mnist_images(base + '/train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(base + '/train-labels-idx1-ubyte.gz')
return X_train, y_train, (None, 1, 28, 28) |
#!/usr/bin/env python
# vim: set expandtab tabstop=4 shiftwidth=4:
# Copyright (c) 2018, CJ Kucera
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the development team nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CJ KUCERA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
try:
from modprocessor import ModProcessor, Config
mp = ModProcessor()
except ModuleNotFoundError:
print('')
print('********************************************************************')
print('To run this script, you will need to copy or symlink modprocessor.py')
print('from the parent directory, so it exists here as well. Sorry for')
print('the bother!')
print('********************************************************************')
print('')
sys.exit(1)
###
### Output variables
###
mod_name = 'Easier ECLIPSE and EOS'
mod_version = '1.0.0'
output_filename = '{}.blcm'.format(mod_name)
###
### Control classes
###
class EclipseUltimate(Config):
"""
Buff ECLIPSE instead of nerfing him. 'cause why not?
"""
label = 'Mega Badass Difficulty (this is a buff, not a nerf!)'
health_mult = 240
shield_mult = 200
nonweapon_damage_mult = 9
arm_laser_damage_scale = 0.7
rocket_speed = 1900
rocket_damage_scale = 1.2
shock_orb_damage_scale = 1
shock_orb_effect_chance_scale = 2
class EclipseStock(Config):
"""
Stock definitions for ECLIPSE
"""
label = 'Stock Difficulty'
health_mult = 180
shield_mult = 160
nonweapon_damage_mult = 7
arm_laser_damage_scale = 0.4
rocket_speed = 1500
rocket_damage_scale = 1
shock_orb_damage_scale = 0.5
shock_orb_effect_chance_scale = 1
class EclipseEasier(Config):
"""
Easier definitions for ECLIPSE
"""
label = 'Easier ECLIPSE'
health_mult = 120
shield_mult = 110
nonweapon_damage_mult = 6
arm_laser_damage_scale = 0.35
rocket_speed = 1300
rocket_damage_scale = 0.8
# Honestly, these aren't too bad IMO, just keeping them at the default.
shock_orb_damage_scale = 0.5
shock_orb_effect_chance_scale = 1
class EclipseWeak(Config):
"""
Weak definitions for ECLIPSE
"""
label = 'Even Easier ECLIPSE'
health_mult = 60
shield_mult = 60
nonweapon_damage_mult = 5
arm_laser_damage_scale = 0.2
rocket_speed = 1100
rocket_damage_scale = 0.6
shock_orb_damage_scale = 0.4
shock_orb_effect_chance_scale = 0.8
class EclipseChump(Config):
"""
And, why not. Total shrimp of a boss.
"""
label = 'Total Chump'
health_mult = 5
shield_mult = 5
nonweapon_damage_mult = 2
arm_laser_damage_scale = 0.1
rocket_speed = 550
rocket_damage_scale = 0.2
shock_orb_damage_scale = 0.2
shock_orb_effect_chance_scale = 0.5
###
### Start generating the mod
###
mod_list = []
mod_list.append("""TPS
#<{mod_name}>
# {mod_name} v{mod_version}
# by Apocalyptech
# Licensed under Public Domain / CC0 1.0 Universal
#
# Makes the boss fights against ECLIPSE and EOS easier. Each has a few different
# options, and can be toggled independently of each other (including setting them
# to the stock values, in case you want to nerf one but not the other).
#
# Should you be feeling masochistic, there's also an option which buffs both of
# them, rather than nerfing.
#<ECLIPSE><MUT>
""".format(mod_name=mod_name, mod_version=mod_version))
###
### ECLIPSE
###
for config in [EclipseEasier(), EclipseWeak(), EclipseChump(), EclipseStock(), EclipseUltimate()]:
mod_list.append("""
#<{config:label}>
#<Health Multiplier>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Character.CharClass_LoaderUltimateBadass AttributeStartingValues[0].BaseValue.BaseValueConstant {config:health_mult}
#</Health Multiplier>
#<Shield Multiplier>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Character.CharClass_LoaderUltimateBadass AttributeStartingValues[6].BaseValue.BaseValueConstant {config:shield_mult}
#</Shield Multiplier>
#<"Non-Weapon" Damage Multiplier>
# This ends up affecting most of ECLIPSE's attacks, such as arm lasers,
# rocket attacks, and shock balls. Could affect other damage output from
# him as well. The extra damage reduction done in the individual
# categories below will be on top of this tweak.
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Character.CharClass_LoaderUltimateBadass AttributeStartingValues[1].BaseValue.BaseValueConstant {config:nonweapon_damage_mult}
#</"Non-Weapon" Damage Multiplier>
#<Arm Lasers>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Anims.Anim_LoaderUltimateBadass_ArmGun_Loop:BehaviorProviderDefinition_32.Behavior_AIThrowProjectileAtTarget_7 ChildProjectileBaseValues[0].BaseValue.BaseValueScaleConstant {config:arm_laser_damage_scale}
#</Arm Lasers>
#<Rockets>
#<Rocket Speed>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Projectiles.Proj_RocketLaunch SpeedFormula.BaseValueConstant {config:rocket_speed}
#</Rocket Speed>
#<Rocket Damage Scale>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Anims.Anim_LoaderUltimateBadass_Missile_Loop:BehaviorProviderDefinition_32.Behavior_SpawnProjectile_50 ChildProjectileBaseValues[0].BaseValue.BaseValueScaleConstant {config:rocket_damage_scale}
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Anims.Anim_LoaderUltimateBadass_Missile_Loop:BehaviorProviderDefinition_32.Behavior_SpawnProjectile_51 ChildProjectileBaseValues[0].BaseValue.BaseValueScaleConstant {config:rocket_damage_scale}
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Anims.Anim_LoaderUltimateBadass_Missile_Loop:BehaviorProviderDefinition_32.Behavior_SpawnProjectile_52 ChildProjectileBaseValues[0].BaseValue.BaseValueScaleConstant {config:rocket_damage_scale}
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Anims.Anim_LoaderUltimateBadass_Missile_Loop:BehaviorProviderDefinition_32.Behavior_SpawnProjectile_53 ChildProjectileBaseValues[0].BaseValue.BaseValueScaleConstant {config:rocket_damage_scale}
#</Rocket Damage Scale>
#</Rockets>
#<Shock Orbs>
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Projectiles.Proj_ShockBall:BehaviorProviderDefinition_0.Behavior_Explode_5 StatusEffectDamage.BaseValueScaleConstant {config:shock_orb_damage_scale}
level Ma_FinalBoss_P set GD_Ma_VoltronTrap.Projectiles.Proj_ShockBall:BehaviorProviderDefinition_0.Behavior_Explode_5 StatusEffectChance.BaseValueScaleConstant {config:shock_orb_effect_chance_scale}
#</Shock Orbs>
#</{config:label}>
""".format(config=config))
###
### End of ECLIPSE
###
mod_list.append('#</ECLIPSE>')
###
### EOS
###
mod_list.append('#<EOS><MUT>')
class EosUltimate(Config):
"""
Buff EOS instead of nerfing him. 'cause why not?
"""
label = 'Mega Badass Difficulty (this is a buff, not a nerf!)'
health_mult = 290
shield_mult = 170
nonweapon_damage_mult = 11
turret_health_scale = 45
turret_damage_scale = 2
rocket_launcher_health_scale = 40
rocket_damage_scale = 2
sticky_grenade_damage_scale = 1.3
moonshot_damage_scale_0 = 15
moonshot_damage_scale_1 = 20
moonshot_badass_pawn = 'GD_Ma_Pop_Glitches.Balance.PawnBalance_BadassGlitch'
moonshot_regular_pawn_0 = 'GD_Ma_Pop_ClaptrapForces.Population.Uniques.PopDef_ShadowClone_Eos'
moonshot_regular_pawn_1 = 'GD_Ma_Pop_Glitches.Population.PopDef_Glitch'
moonshot_regular_pawn_2 = 'GD_Ma_Pop_Virus.Population.PopDef_VirusLauncher'
moonshot_regular_pawn_3 = 'GD_Ma_Pop_Virus.Population.PopDef_Virus'
moonshot_regular_pawn_4 = 'GD_Ma_Pop_Virus.Population.PopDef_ParasiticVirus'
# I *think* this is for the first bit of the battle
moonshot_regular_pawn_0_weight_0 = 1.0
moonshot_regular_pawn_1_weight_0 = 0.6
moonshot_regular_pawn_2_weight_0 = 1.0
moonshot_regular_pawn_3_weight_0 = 0.6
moonshot_regular_pawn_4_weight_0 = 1.0
# And then this is after EOS is hurt a bit
moonshot_regular_pawn_0_weight_1 = 1.0
moonshot_regular_pawn_1_weight_1 = 0.3
moonshot_regular_pawn_2_weight_1 = 2.0
moonshot_regular_pawn_3_weight_1 = 0.5
moonshot_regular_pawn_4_weight_1 = 1.2
eye_of_helios_delay = 0
eye_of_helios_damage_scale = 99
eye_of_helios_damage_radius = 2000
minion_regular_population = 'GD_Ma_Pop_Glitches.Population.PopDef_BadassGlitch'
minion_regular_max_active = 3
minion_regular_max_total = 20
minion_regular_respawn_delay = 0.2
minion_badass_population = 'GD_Ma_Pop_Glitches.Population.PopDef_BadassGlitch'
minion_badass_max_active = 1
minion_badass_max_total = 20
minion_badass_respawn_delay = 0.2
class EosStock(Config):
"""
Stock definitions for ECLIPSE
"""
# Not going to do anything with the yellow sticky-grenade things that EOS
# lobs at you when its turrets are down. They're at GD_Ma_Helios.Projectiles.Proj_SpamGrenade.
# Would be pretty trivial to do so if we wanted, though.
label = 'Stock Difficulty'
health_mult = 220
shield_mult = 130
nonweapon_damage_mult = 8
turret_health_scale = 35
turret_damage_scale = 1
rocket_launcher_health_scale = 25
rocket_damage_scale = 1
sticky_grenade_damage_scale = 1
moonshot_damage_scale_0 = 12
moonshot_damage_scale_1 = 15
moonshot_badass_pawn = 'GD_Ma_Pop_Glitches.Balance.PawnBalance_BadassGlitch'
moonshot_regular_pawn_0 = 'GD_Ma_Pop_ClaptrapForces.Population.Uniques.PopDef_ShadowClone_Eos'
moonshot_regular_pawn_1 = 'GD_Ma_Pop_Glitches.Population.PopDef_Glitch'
moonshot_regular_pawn_2 = 'GD_Ma_Pop_Virus.Population.PopDef_VirusLauncher'
moonshot_regular_pawn_3 = 'GD_Ma_Pop_Virus.Population.PopDef_Virus'
moonshot_regular_pawn_4 = 'GD_Ma_Pop_Virus.Population.PopDef_ParasiticVirus'
# I *think* this is for the first bit of the battle
moonshot_regular_pawn_0_weight_0 = 0.4
moonshot_regular_pawn_1_weight_0 = 1.0
moonshot_regular_pawn_2_weight_0 = 0.25
moonshot_regular_pawn_3_weight_0 = 1.0
moonshot_regular_pawn_4_weight_0 = 1.0
# And then this is after EOS is hurt a bit
moonshot_regular_pawn_0_weight_1 = 0.25
moonshot_regular_pawn_1_weight_1 = 1.0
moonshot_regular_pawn_2_weight_1 = 1.0
moonshot_regular_pawn_3_weight_1 = 1.0
moonshot_regular_pawn_4_weight_1 = 1.0
eye_of_helios_delay = 0
eye_of_helios_damage_scale = 99
eye_of_helios_damage_radius = 1500
minion_regular_population = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
minion_regular_max_active = 3
minion_regular_max_total = 10
minion_regular_respawn_delay = 0.5
minion_badass_population = 'GD_Ma_Pop_Glitches.Population.PopDef_BadassGlitch'
minion_badass_max_active = 1
minion_badass_max_total = 4
minion_badass_respawn_delay = 0.5
class EosEasier(Config):
"""
Easier definitions for EOS
"""
label = 'Easier EOS'
health_mult = 160
shield_mult = 85
nonweapon_damage_mult = 5.5
turret_health_scale = 20
turret_damage_scale = 0.8
rocket_launcher_health_scale = 20
rocket_damage_scale = 0.8
sticky_grenade_damage_scale = 0.8
moonshot_damage_scale_0 = 10
moonshot_damage_scale_1 = 13
moonshot_badass_pawn = 'GD_Ma_Pop_Glitches.Balance.PawnBalance_BadassGlitch'
moonshot_regular_pawn_0 = 'GD_Ma_Pop_ClaptrapForces.Population.Uniques.PopDef_ShadowClone_Eos'
moonshot_regular_pawn_1 = 'GD_Ma_Pop_Glitches.Population.PopDef_Glitch'
moonshot_regular_pawn_2 = 'GD_Ma_Pop_Virus.Population.PopDef_VirusLauncher'
moonshot_regular_pawn_3 = 'GD_Ma_Pop_Virus.Population.PopDef_Virus'
moonshot_regular_pawn_4 = 'GD_Ma_Pop_Virus.Population.PopDef_ParasiticVirus'
# I *think* this is for the first bit of the battle
moonshot_regular_pawn_0_weight_0 = 0.2
moonshot_regular_pawn_1_weight_0 = 1.0
moonshot_regular_pawn_2_weight_0 = 0.2
moonshot_regular_pawn_3_weight_0 = 1.0
moonshot_regular_pawn_4_weight_0 = 0.9
# And then this is after EOS is hurt a bit
moonshot_regular_pawn_0_weight_1 = 0.25
moonshot_regular_pawn_1_weight_1 = 1.0
moonshot_regular_pawn_2_weight_1 = 0.5
moonshot_regular_pawn_3_weight_1 = 1.0
moonshot_regular_pawn_4_weight_1 = 1.0
eye_of_helios_delay = 0.5
eye_of_helios_damage_scale = 90
eye_of_helios_damage_radius = 1300
minion_regular_population = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
minion_regular_max_active = 3
minion_regular_max_total = 10
minion_regular_respawn_delay = 1.5
minion_badass_population = 'GD_Ma_Pop_Glitches.Population.PopDef_BadassGlitch'
minion_badass_max_active = 1
minion_badass_max_total = 2
minion_badass_respawn_delay = 3
class EosWeak(Config):
"""
Weak definitions for ECLIPSE
"""
label = 'Even Easier EOS'
health_mult = 120
shield_mult = 60
nonweapon_damage_mult = 4.5
turret_health_scale = 15
turret_damage_scale = 0.4
rocket_launcher_health_scale = 15
rocket_damage_scale = 0.7
sticky_grenade_damage_scale = 0.5
moonshot_damage_scale_0 = 8
moonshot_damage_scale_1 = 10
moonshot_badass_pawn = 'GD_Ma_Pop_Virus.Balance.PawnBalance_VirusLauncher'
moonshot_regular_pawn_0 = 'GD_Ma_Pop_ClaptrapForces.Population.Uniques.PopDef_ShadowClone_Eos'
moonshot_regular_pawn_1 = 'GD_Ma_Pop_Glitches.Population.PopDef_Glitch'
moonshot_regular_pawn_2 = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
moonshot_regular_pawn_3 = 'GD_Ma_Pop_Virus.Population.PopDef_Virus'
moonshot_regular_pawn_4 = 'GD_Ma_Pop_Virus.Population.PopDef_ParasiticVirus'
# I *think* this is for the first bit of the battle
moonshot_regular_pawn_0_weight_0 = 0
moonshot_regular_pawn_1_weight_0 = 1.0
moonshot_regular_pawn_2_weight_0 = 1.0
moonshot_regular_pawn_3_weight_0 = 1.0
moonshot_regular_pawn_4_weight_0 = 0.4
# And then this is after EOS is hurt a bit
moonshot_regular_pawn_0_weight_1 = 0.25
moonshot_regular_pawn_1_weight_1 = 1.0
moonshot_regular_pawn_2_weight_1 = 1.0
moonshot_regular_pawn_3_weight_1 = 1.0
moonshot_regular_pawn_4_weight_1 = 0.8
eye_of_helios_delay = 1
eye_of_helios_damage_scale = 80
eye_of_helios_damage_radius = 1200
minion_regular_population = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
minion_regular_max_active = 2
minion_regular_max_total = 6
minion_regular_respawn_delay = 3
minion_badass_population = 'GD_Ma_Pop_Glitches.Population.PopDef_BadassGlitch'
minion_badass_max_active = 1
minion_badass_max_total = 1
minion_badass_respawn_delay = 6
class EosChump(Config):
"""
And, why not. Total shrimp of a boss.
"""
label = 'Total Chump'
health_mult = 40
shield_mult = 10
nonweapon_damage_mult = 2
turret_health_scale = 5
turret_damage_scale = 0.4
rocket_launcher_health_scale = 5
rocket_damage_scale = 0.4
sticky_grenade_damage_scale = 0.3
moonshot_damage_scale_0 = 4
moonshot_damage_scale_1 = 6
moonshot_badass_pawn = 'GD_Ma_Pop_Virus.Balance.PawnBalance_ParasiticVirus'
moonshot_regular_pawn_0 = 'GD_Ma_Pop_ClaptrapForces.Population.Uniques.PopDef_ShadowClone_Eos'
moonshot_regular_pawn_1 = 'GD_Ma_Pop_Glitches.Population.PopDef_Glitch'
moonshot_regular_pawn_2 = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
moonshot_regular_pawn_3 = 'GD_Ma_Pop_Virus.Population.PopDef_Virus'
moonshot_regular_pawn_4 = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
# I *think* this is for the first bit of the battle
moonshot_regular_pawn_0_weight_0 = 0
moonshot_regular_pawn_1_weight_0 = 1.0
moonshot_regular_pawn_2_weight_0 = 1.0
moonshot_regular_pawn_3_weight_0 = 1.0
moonshot_regular_pawn_4_weight_0 = 1.0
# And then this is after EOS is hurt a bit
moonshot_regular_pawn_0_weight_1 = 0
moonshot_regular_pawn_1_weight_1 = 1.0
moonshot_regular_pawn_2_weight_1 = 1.0
moonshot_regular_pawn_3_weight_1 = 1.0
moonshot_regular_pawn_4_weight_1 = 1.0
eye_of_helios_delay = 2
eye_of_helios_damage_scale = 70
eye_of_helios_damage_radius = 1000
minion_regular_population = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
minion_regular_max_active = 2
minion_regular_max_total = 4
minion_regular_respawn_delay = 5
minion_badass_population = 'GD_Ma_Pop_Glitches.Mixes.PopDef_Glitches_Mix_FinalBoss_Weak'
minion_badass_max_active = 0
minion_badass_max_total = 0
minion_badass_respawn_delay = 20
for config in [EosEasier(), EosWeak(), EosChump(), EosStock(), EosUltimate()]:
mod_list.append("""
#<{config:label}>
#<Health and Shield Multiplier>
# For some reason completely unbeknownst to me, some of our earlier statements
# which modify ECLIPSE end up altering the EOS AIPawnBalanceDefinition;
# specifically, they remove its PlayThroughs[0].AttributeStartingValues
# array. Damned if I know why. It's the sets to the AttributeStartingValues
# array in GD_Ma_VoltronTrap.Character.CharClass_LoaderUltimateBadass which
# does it, which makes no bloody sense at all. They're two totally different
# objects. And not even the same *kind* of object. I don't know. Weird.
# Anyway, we have to recreate it entirely in here. We *could* just use
# the CharClass instead, and leave them blank here, of course, but it's a
# point of pride to keep this in here, at this point.
level Ma_FinalBoss_P set GD_Ma_Pop_BossFights.Balance.PawnBalance_Helios PlayThroughs[0].AttributeStartingValues
(
(
Attribute = AttributeDefinition'GD_Balance_HealthAndDamage.AIParameters.Attribute_HealthMultiplier',
BaseValue =
(
BaseValueConstant = {config:health_mult},
BaseValueAttribute = None,
InitializationDefinition = None,
BaseValueScaleConstant = 1.000000
)
),
(
Attribute = AttributeDefinition'GD_Balance_HealthAndDamage.AIParameters.Attribute_EnemyShieldMaxValueMultiplier',
BaseValue =
(
BaseValueConstant = {config:shield_mult},
BaseValueAttribute = None,
InitializationDefinition = None,
BaseValueScaleConstant = 1.000000
)
)
)
#</Health and Shield Multiplier>
#<"Non-Weapon" Damage Multiplier>
# This ends up affecting most of EOS's attacks
level Ma_FinalBoss_P set GD_Ma_Helios.Character.CharClass_Ma_Helios AttributeStartingValues[1].BaseValue.BaseValueConstant {config:nonweapon_damage_mult}
#</"Non-Weapon" Damage Multiplier>
#<Turrets>
#<Regular Turrets>
#<Health>
level Ma_FinalBoss_P set GD_Ma_HeliosTurret.Character.CharClass_Ma_HeliosTurret AttributeStartingValues[1].BaseValue.BaseValueConstant {config:turret_health_scale}
#</Health>
#<Damage>
# I'm actually not totally sure what buffs these up to begin with, but we can scale the final damage var pretty easily.
# (I'm guessing it's the non-weapon multiplier, above, though I'm not sure how)
level Ma_FinalBoss_P set GD_Ma_HeliosTurret.Weapons.Ma_HeliosTurret_WeaponType InstantHitDamage.BaseValueScaleConstant {config:turret_damage_scale}
#</Damage>
#</Regular Turrets>
#<Rocket Launchers>
#<Health>
level Ma_FinalBoss_P set GD_Ma_EosRocketTurret.Character.CharClass_Ma_EosRocketTurret AttributeStartingValues[1].BaseValue.BaseValueConstant {config:rocket_launcher_health_scale}
#</Health>
#<Damage>
# I'm actually not totally sure what buffs these up to begin with, but we can scale the final damage var pretty easily.
# (I'm guessing it's the non-weapon multiplier, above, though I'm not sure how)
level Ma_FinalBoss_P set GD_Ma_EosRocketTurret.Projectiles.Projectile_Rocket:BehaviorProviderDefinition_0.Behavior_Explode_351 DamageFormula.BaseValueScaleConstant {config:rocket_damage_scale}
#</Damage>
#</Rocket Launchers>
#</Turrets>
#<Sticky Grenades>
# These are the yellow grenades that EOS throws out during the final phase, or
# when all of his turrets have been destroyed
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_SpamGrenade:BehaviorProviderDefinition_1.Behavior_Explode_11 DamageFormula.BaseValueScaleConstant {config:sticky_grenade_damage_scale}
#</Sticky Grenades>
#<Moonshot Attack>
#<Overall Damage>
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_Explode_5 DamageFormula.BaseValueScaleConstant {config:moonshot_damage_scale_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_Explode_6 DamageFormula.BaseValueScaleConstant {config:moonshot_damage_scale_1}
#</Overall Damage>
#<Spawned Reinforcements>
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_1.PopulationFactoryBalancedAIPawn_0 PawnBalanceDefinition AIPawnBalanceDefinition'{config:moonshot_badass_pawn}'
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_20.PopulationFactoryPopulationDefinition_0 PopulationDef WillowPopulationDefinition'{config:moonshot_regular_pawn_1}'
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_21.PopulationFactoryPopulationDefinition_0 PopulationDef WillowPopulationDefinition'{config:moonshot_regular_pawn_0}'
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_22.PopulationFactoryPopulationDefinition_0 PopulationDef WillowPopulationDefinition'{config:moonshot_regular_pawn_2}'
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_23.PopulationFactoryPopulationDefinition_0 PopulationDef WillowPopulationDefinition'{config:moonshot_regular_pawn_4}'
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_SpawnFromPopulationSystem_24.PopulationFactoryPopulationDefinition_0 PopulationDef WillowPopulationDefinition'{config:moonshot_regular_pawn_3}'
#</Spawned Reinforcements>
#<Spawn Weights>
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0 BehaviorSequences[2].BehaviorData2[3].LinkedVariables.ArrayIndexAndLength 0
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0 BehaviorSequences[2].BehaviorData2[16].LinkedVariables.ArrayIndexAndLength 0
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_11 Conditions[0] {config:moonshot_regular_pawn_0_weight_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_11 Conditions[1] {config:moonshot_regular_pawn_1_weight_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_11 Conditions[2] {config:moonshot_regular_pawn_2_weight_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_11 Conditions[3] {config:moonshot_regular_pawn_3_weight_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_11 Conditions[4] {config:moonshot_regular_pawn_4_weight_0}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_12 Conditions[0] {config:moonshot_regular_pawn_0_weight_1}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_12 Conditions[1] {config:moonshot_regular_pawn_1_weight_1}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_12 Conditions[2] {config:moonshot_regular_pawn_2_weight_1}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_12 Conditions[3] {config:moonshot_regular_pawn_3_weight_1}
level Ma_FinalBoss_P set GD_Ma_Helios.Projectiles.Proj_MoonShotCannon:BehaviorProviderDefinition_0.Behavior_RandomBranch_12 Conditions[4] {config:moonshot_regular_pawn_4_weight_1}
#</Spawn Weights>
#</Moonshot Attack>
#<Eye of Helios>
#<Attack Delay>
# Increasing the delay here will also have the effect of shortening the laser beam by that much.
# I'd love to figure out how to inject this delay *before* the laser-charging animation comes
# on, but the BPDs for EOS are just hideous, and I found this first, and it works, so I'm just
# leaving it there. :) It's easy to extend the laser duration by adding a delay to COLD[205]
# but the eye closes according to its original schedule, and I hadn't found where that timing was.
level Ma_FinalBoss_P set GD_Ma_Helios.Character.AiDef_Ma_Helios:AIBehaviorProviderDefinition_0 BehaviorSequences[0].ConsolidatedOutputLinkData[203].ActivateDelay {config:eye_of_helios_delay}
#</Attack Delay>
#<Attack Damage + Radius>
# I actually don't intend on nerfing this too much; it should remain a very deadly attack
level Ma_FinalBoss_P set GD_Ma_ShadowTrapEye.Character.AIDef_EyeOfHelios:AIBehaviorProviderDefinition_0.Behavior_FireBeam_88 DamagePerSecondFormula.BaseValueScaleConstant {config:eye_of_helios_damage_scale}
level Ma_FinalBoss_P set GD_Ma_ShadowTrapEye.Character.AIDef_EyeOfHelios:AIBehaviorProviderDefinition_0.Behavior_FireBeam_88 RadiusToDoDamageAroundImpact.BaseValueConstant {config:eye_of_helios_damage_radius}
#</Attack Damage + Radius>
#</Eye of Helios>
#<Between-Wave Enemy Spawns>
#<Regular Enemies>
#<Spawn Pool>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 PopulationDef WillowPopulationDefinition'{config:minion_regular_population}'
#</Spawn Pool>
#<Max Active>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 MaxActiveActorsIsNormal {config:minion_regular_max_active}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 MaxActiveActorsThreatened {config:minion_regular_max_active}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 SpawnData.MaxActiveActors {config:minion_regular_max_active}
#</Max Active>
#<Total Spawned>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 MaxTotalActors {config:minion_regular_max_total}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 MaxTotalActorsFormula.BaseValueConstant {config:minion_regular_max_total}
#</Total Spawned>
#<Respawn Delay>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_4 RespawnDelay {config:minion_regular_respawn_delay}
#</Respawn Delay>
#</Regular Enemies>
#<Badass Enemies>
#<Spawn Pool>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 PopulationDef WillowPopulationDefinition'{config:minion_badass_population}'
#</Spawn Pool>
#<Max Active>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 MaxActiveActorsIsNormal {config:minion_badass_max_active}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 MaxActiveActorsThreatened {config:minion_badass_max_active}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 SpawnData.MaxActiveActors {config:minion_badass_max_active}
#</Max Active>
#<Total Spawned>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 MaxTotalActors {config:minion_badass_max_total}
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 MaxTotalActorsFormula.BaseValueConstant {config:minion_badass_max_total}
#</Total Spawned>
#<Respawn Delay>
level Ma_FinalBoss_P set Ma_FinalBoss_Game.TheWorld:PersistentLevel.PopulationOpportunityDen_1 RespawnDelay {config:minion_badass_respawn_delay}
#</Respawn Delay>
#</Badass Enemies>
#</Between-Wave Enemy Spawns>
#</{config:label}>
""".format(config=config))
###
### Close out the mod
###
mod_list.append('#</EOS>')
mod_list.append('#</{}>'.format(mod_name))
###
### Output to a file.
###
mp.human_str_to_blcm_filename("\n\n".join(mod_list), output_filename)
print('Wrote mod file to: {}'.format(output_filename))
|
class Solution:
def isPalindrome(self, x: int) -> bool:
if x <0 :
return False
if x == 0:
return True
elif x%10==0 :
return False
rev = 0
while x > rev:
rem = x%10
rev = rev*10+rem
x = int(x/10)
return x==rev or x==int(rev/10) |
'''
Given an m x n board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cells,
where "adjacent" cells are horizontally or vertically neighboring.
The same letter cell may not be used more than once.
Example 1:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED"
Output: true
Example 2:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE"
Output: true
Example 3:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
Output: false
'''
# where "adjacent" cells are horizontally or vertically neighboring.
# def dfs(word, index_i, index_j, path):
def check(word, i, j, board, path, visited):
# print(i, j, word, path)
# print(visited)
if board[i][j] == word[0] and (i,j) not in visited:
# print("en", i, j)
visited.append((i,j))
if not word[1:]:
return True
# check neighbor has next char
if i != 0: # up exist
up = board[i-1][j]
# print(up, word[0])
if word[1] == up and check(word[1:], i-1, j, board, path+up, visited):
return True
if i != len(board)-1:
down = board[i+1][j]
if word[1] == down and check(word[1:], i+1, j, board, path+down, visited):
return True
if j != len(board[i])-1:
right = board[i][j+1]
if word[1] == right and check(word[1:], i, j+1, board, path+right, visited):
return True
if j != 0:
left = board[i][j-1]
if word[1] == left and check(word[1:], i, j-1, board, path+left, visited):
return True
visited.remove((i, j))
def exist(board, word):
visited = []
for i in range(len(board)):
for j in range(len(board[i])):
if check(word, i, j, board, "", visited):
return True
return False
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "SEE"
print(exist(board, word))
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCCED"
print(exist(board, word))
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCB"
print(exist(board, word))
board = [["C","A","A"],["A","A","A"],["B","C","D"]]
word = "AAB"
print(exist(board, word))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import random
import subprocess
def pywal_image(image_path):
cmd = ['wal', '-g', '-i', image_path]
print(' '.join(cmd))
subprocess.call(cmd)
def rotate_background(backgrounds_dir):
background_imgs = [os.path.join(backgrounds_dir, img) for img in os.listdir(backgrounds_dir)]
pywal_image(random.choice(background_imgs))
if __name__ == '__main__':
if len(sys.argv) < 1:
print('Passing directory containing images to use: ./rotate_backgrounds.py /path/to/backgrounds_directory')
else:
backgrounds_dir_path = sys.argv[1]
if '~' in backgrounds_dir_path:
backgrounds_dir_path = os.path.expanduser(backgrounds_dir_path)
rotate_background(backgrounds_dir_path)
|
import requests
url = 'https://www.12306.cn/'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
# 因为https 是有第三方 CA证书认证的
# 但是 12606 虽然是https 但是 它不是CA证书,他是自己 颁发的证书
# 解决办法 是:告诉web 忽略证书 访问 使verify=False 默认是true
response = requests.get(url,headers=headers,verify=False)
data = response.content.decode('utf-8')
with open('02-ssl.html','w',encoding='utf-8') as f:
f.write(data)
|
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import mininx as nx
from mininx.classes.graph import Graph
from mininx import MiniNXError
class MultiGraph(Graph):
# node_dict_factory=dict # already assigned in Graph
# adjlist_dict_factory=dict
edge_key_dict_factory = dict
# edge_attr_dict_factory=dict
def __init__(self, data=None, **attr):
self.edge_key_dict_factory = self.edge_key_dict_factory
Graph.__init__(self, data, **attr)
def add_edge(self, u, v, key=None, attr_dict=None, **attr):
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise MiniNXError(
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.adj:
self.adj[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.adj:
self.adj[v] = self.adjlist_dict_factory()
self.node[v] = {}
if v in self.adj[u]:
keydict = self.adj[u][v]
if key is None:
# find a unique integer key
# other methods might be better here?
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, self.edge_attr_dict_factory())
datadict.update(attr_dict)
keydict[key] = datadict
else:
# selfloops work this way without special treatment
if key is None:
key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr_dict)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.adj[u][v] = keydict
self.adj[v][u] = keydict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise MiniNXError(
"The attr_dict argument must be a dictionary.")
# process ebunch
for e in ebunch:
ne = len(e)
if ne == 4:
u, v, key, dd = e
elif ne == 3:
u, v, dd = e
key = None
elif ne == 2:
u, v = e
dd = {}
key = None
else:
raise MiniNXError(
"Edge tuple %s must be a 2-tuple, 3-tuple or 4-tuple." % (e,))
ddd = {}
ddd.update(attr_dict)
ddd.update(dd)
self.add_edge(u, v, key, ddd)
def remove_edge(self, u, v, key=None):
try:
d = self.adj[u][v]
except (KeyError):
raise MiniNXError(
"The edge %s-%s is not in the graph." % (u, v))
# remove the edge with specified data
if key is None:
d.popitem()
else:
try:
del d[key]
except (KeyError):
raise MiniNXError(
"The edge %s-%s with key %s is not in the graph." % (
u, v, key))
if len(d) == 0:
# remove the key entries if last edge
del self.adj[u][v]
if u!=v: # check for selfloop
del self.adj[v][u]
def remove_edges_from(self, ebunch):
for e in ebunch:
try:
self.remove_edge(*e[:3])
except MiniNXError:
pass
def has_edge(self, u, v, key=None):
try:
if key is None:
return v in self.adj[u]
else:
return key in self.adj[u][v]
except KeyError:
return False
def edges(self, nbunch=None, data=False, keys=False, default=None):
seen = {} # helper dict to keep track of multiply stored edges
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs = ((n, self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
if nbr not in seen:
for key, ddict in keydict.items():
yield (n, nbr, key, ddict) if keys else (n, nbr, ddict)
seen[n] = 1
elif data is not False:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
if nbr not in seen:
for key, ddict in keydict.items():
d = ddict[data] if data in ddict else default
yield (n, nbr, key, d) if keys else (n, nbr, d)
seen[n] = 1
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
if nbr not in seen:
for key in keydict:
yield (n, nbr, key) if keys else (n, nbr)
seen[n] = 1
del seen
def get_edge_data(self, u, v, key=None, default=None):
try:
if key is None:
return self.adj[u][v]
else:
return self.adj[u][v][key]
except KeyError:
return default
def degree(self, nbunch=None, weight=None):
# Test to see if nbunch is a single node, an iterator of nodes or
# None(indicating all nodes). (nbunch in self) is True when nbunch
# is a single node.
if nbunch in self:
nbrs = self.adj[nbunch]
if weight is None:
return sum([len(data) for data in nbrs.values()]) + (nbunch in nbrs and len(nbrs[nbunch]))
deg = sum([d.get(weight, 1) for data in nbrs.values() for d in data.values()])
if nbunch in nbrs:
deg += sum([d.get(weight, 1) for key, d in nbrs[nbunch].items()])
return deg
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs = ((n, self.adj[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, nbrs in nodes_nbrs:
deg = sum([len(data) for data in nbrs.values()])
yield (n, deg + (n in nbrs and len(nbrs[n])))
else:
# edge weighted graph - degree is sum of nbr edge weights
def d_iter():
for n, nbrs in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in nbrs.values()
for d in data.values()])
if n in nbrs:
deg += sum([d.get(weight, 1)
for key, d in nbrs[n].items()])
yield (n, deg)
return d_iter()
def is_multigraph(self):
return True
def is_directed(self):
return False
def to_directed(self):
from mininx.classes.multidigraph import MultiDiGraph
G = MultiDiGraph()
G.add_nodes_from(self)
G.add_edges_from((u, v, key, deepcopy(datadict))
for u, nbrs in self.adjacency()
for v, keydict in nbrs.items()
for key, datadict in keydict.items())
G.graph = deepcopy(self.graph)
G.node = deepcopy(self.node)
return G
def selfloop_edges(self, data=False, keys=False, default=None):
if data is True:
if keys:
return ((n, n, k, d)
for n, nbrs in self.adj.items()
if n in nbrs for k, d in nbrs[n].items())
else:
return ((n, n, d)
for n, nbrs in self.adj.items()
if n in nbrs for d in nbrs[n].values())
elif data is not False:
if keys:
return ((n, n, k, d.get(data, default))
for n, nbrs in self.adj.items()
if n in nbrs for k, d in nbrs[n].items())
else:
return ((n, n, d.get(data, default))
for n, nbrs in self.adj.items()
if n in nbrs for d in nbrs[n].values())
else:
if keys:
return ((n, n, k)
for n, nbrs in self.adj.items()
if n in nbrs for k in nbrs[n].keys())
else:
return ((n, n)
for n, nbrs in self.adj.items()
if n in nbrs for d in nbrs[n].values())
def number_of_edges(self, u=None, v=None):
if u is None: return self.size()
try:
edgedata = self.adj[u][v]
except KeyError:
return 0 # no such edge
return len(edgedata)
def subgraph(self, nbunch):
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n] = self.node[n]
# namespace shortcuts for speed
H_adj = H.adj
self_adj = self.adj
# add nodes and edges (undirected method)
for n in H:
Hnbrs = H.adjlist_dict_factory()
H_adj[n] = Hnbrs
for nbr, edgedict in self_adj[n].items():
if nbr in H_adj:
# add both representations of edge: n-nbr and nbr-n
# they share the same edgedict
ed = edgedict.copy()
Hnbrs[nbr] = ed
H_adj[nbr][n] = ed
H.graph = self.graph
return H
def edge_subgraph(self, edges):
H = self.__class__()
adj = self.adj
# Filter out edges that don't correspond to nodes in the graph.
def is_in_graph(u, v, k):
return u in adj and v in adj[u] and k in adj[u][v]
edges = (e for e in edges if is_in_graph(*e))
for u, v, k in edges:
# Copy the node attributes if they haven't been copied
# already.
if u not in H.node:
H.node[u] = self.node[u]
if v not in H.node:
H.node[v] = self.node[v]
# Create an entry in the adjacency dictionary for the
# nodes u and v if they don't exist yet.
if u not in H.adj:
H.adj[u] = H.adjlist_dict_factory()
if v not in H.adj:
H.adj[v] = H.adjlist_dict_factory()
# Create an entry in the edge dictionary for the edges (u,
# v) and (v, u) if the don't exist yet.
if v not in H.adj[u]:
H.adj[u][v] = H.edge_key_dict_factory()
if u not in H.adj[v]:
H.adj[v][u] = H.edge_key_dict_factory()
# Copy the edge attributes.
H.edge[u][v][k] = self.edge[u][v][k]
H.edge[v][u][k] = self.edge[v][u][k]
H.graph = self.graph
return H
|
from accounts.models import *
from django.contrib.postgres.fields import JSONField
from django.utils import timezone
NOTIFICATION_TYPE = (
('1', 'POST_INVOLVEMENT'), ('2', 'POST_COMMENT'), ('3', 'POST_LIKE'), ('4', 'POST_NEW'), ('5', 'QUESTION_NEW'),
('6', 'UPDATE_FOLLOWED_USER'), ('7', 'Chat'), )
ADMIN_NOTIFICATION_TYPE = (('1', 'REPORT_PROFILE'), ('2', 'REPORT_POST'), ('3', 'FLAG_PROFILE'), ('4', 'FLAG_POST'),
('5', 'NEW_REGISTRATION'))
class NotificationsDetail(models.Model):
notification_by = models.ForeignKey(User, blank=False, on_delete=models.CASCADE,
related_name='notification_by_user')
notification_for = models.ForeignKey(User, blank=False, on_delete=models.CASCADE,
related_name='notification_for_users')
notification_type = models.CharField(max_length=1, choices=NOTIFICATION_TYPE, blank=False)
notification_context = JSONField()
notification_sender_model_name = models.CharField(max_length=50, blank=False)
created_at = models.DateTimeField(auto_now_add=True)
class AdminNotifications(models.Model):
notification_by = models.ForeignKey(User, blank=False, on_delete=models.CASCADE,
related_name='notification_admin_user')
notification_type = models.CharField(max_length=1, choices=ADMIN_NOTIFICATION_TYPE, blank=False)
notification_context = JSONField()
sender_model_name = models.CharField(max_length=50, blank=False)
sender_pk = models.IntegerField(blank=False)
read = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
def get_created_time(self):
time = timezone.now()
if self.created_on.day == time.day and self.created_on.month == time.month and self.created_on.year == time.year:
if (time.hour - self.created_on.hour) == 0:
minute = time.minute - self.created_on.minute
if minute < 1:
return "Just Now"
return str(minute) + " min ago"
return str(time.hour - self.created_on.hour) + " hours ago"
else:
time_left = (time - self.created_on).days
if time_left < 1:
return str((time-self.created_on).seconds // 3600) + " hours ago"
elif 1 <= time_left < 30:
return str(time_left) + " days ago"
elif 30 <= time_left < 365:
return str(round(time_left / 30)) + " months ago"
elif time_left >= 365:
return str(round(time_left / 365)) + " years ago"
else:
return self.created_on
|
# inheritance
# users
# -Wizard
# -archers
# -ogres
class User(): # parent
def sign_in(self):
print('logged in')
class Wizard(User): # child1
def __init__(self, name, power):
self.name = name
self.power = power
def attack(self):
print(f'attacking power of {self.name} is {self.power}')
class Archer(User): # child2
def __init__(self, name, num_arrows):
self.name = name
self.num_arrows = num_arrows
def attack(self):
print(f'no of arrows of {self.name} is {self.num_arrows}')
def run(self):
print('Run run run fast')
class HybridBorg(Wizard, Archer):
def __init__(self, name, power, arrows):
Archer.__init__(self, name, arrows)
Wizard.__init__(self, name, arrows)
hb1 = HybridBorg('boggy', 50, 100)
print(hb1.attack())
print(hb1.sign_in())
# Wizard1 = Wizard('Harry', 30)
# print(isinstance(Wizard1, object))
# archer1 = Archer('Robin', 20)
# Wizard1.attack()
# archer1.attack()
|
# By having class "Scope", we are able to create scopes and each scope has a name and has the ability to be inserted or to be searched through
class Scope:
def __init__(self, name):
self.data = {}
self.name = name
def name(self):
return self.name
def search(self, x):
if x in self.data.keys():
return True
def insert(self, variable, Type, value, offset): # Here we insert the variable and its corresponding register, value and offset
self.data[variable] = [Type , value, offset]
scopes = [Scope("S0")] # We initialize S0 which is the global scope. The next scopes will be S1, S2, S3 etc.
def top(): # Shows the name of the scope that is currently at the top of the stack
try:
return scopes[(len(scopes)-1)].name
except:
print "No scope exists!"
def enter_new_scope(): # Makes a new instance of scope
try:
i = len(scopes)
scopes.append(Scope("S" + str(i)))
except:
pass
def leave_current_scope(): # This acts like a Pop(), so it gets rid of the current scope and goes to the previous scope which might be the global scope
if len(scopes) > 0:
del scopes[-1]
else:
print "No scope to leave!"
def current_scope(): # This returns the actual scope, not the name of it, which is currently on the top of the stack
return scopes[(len(scopes)-1)]
def search(string): # This searches for a given string through all the scopes and returns "True" if it finds the string somewhere in one of the scopes, and otherwise returns "False"
for scope in scopes:
if scope.search(string) == True:
return True
break
return False
def Type(string): # This searches for a given string through all the scopes and returns the type of the variable
for scope in scopes:
if scope.search(string) == True:
return scope.data[string][0]
def size(string): # This searches for a given string through all the scopes and returns the value of the variable
for scope in scopes:
if scope.search(string) == True:
return scope.data[string][1]
def offset(string): # This searches for a given string through all the scopes and returns the value of its offset
for scope in scopes:
if scope.search(string) == True:
return scope.data[string][2]
def val(offset): # return the value of a variable given its offset
for scope in scopes:
for variable in scope.data:
if type(scope.data[variable][2]) != list:
if scope.data[variable][2] == offset:
return scope.data[variable][1]
else:
#print scope.data[variable][1]
for i in range(len(scope.data[variable][1])):
if scope.data[variable][2][i] == offset:
return scope.data[variable][1][i]
def var(offset): # return the name of a variable given its offset
for scope in scopes:
for variable in scope.data:
if type(scope.data[variable][2]) != list:
if scope.data[variable][2] == offset:
return variable
else:
if offset in scope.data[variable][2]:
return variable
def offset_a(string): # This searches for a given array through all the scopes and returns the value of its offset
for scope in scopes:
if scope.search(string) == True:
return scope.data[string][2][0]
'''
## Testing the program:
print "The current scope which is the global scope which is:", Top()
print
print "Insertin some strings ('main', 'x', 'y') to the global scope:"
print "Current_scope().insert('main')"
Current_scope().insert("main")
print "Current_scope().insert('x')"
Current_scope().insert("x")
print "Current_scope().insert('y')"
Current_scope().insert("y")
print
print "Entering a new scope: Enter_new_scope()"
Enter_new_scope()
print "checking the current scope: ", Top()
print "Insertin some strings ('Sweany', 'Bryce', 'Caragea') to the current scope:"
print "Current_scope().insert('Sweany')"
Current_scope().insert("Sweany")
print "Current_scope().insert('Bryce')"
Current_scope().insert("Bryce")
print "Current_scope().insert('Caragea')"
Current_scope().insert("Caragea")
print
print "Searching for string 'Nielsen' returns False since its not in any of the scopes:"
print "Search('Nielsen')"
print Search("Nielsen")
print "Searching for string 'Sweany' returns True since it's already insterted in one of the scopes: "
print "Search('Sweany')"
print Search("Sweany")
print "Searching for string 'main' returns True since it's already insterted in one of the scopes: "
print "Search('main')"
print Search('main')
print "leaving the current scope and getting one scope closer to the global scope:", "Leave_current_scope()"
Leave_current_scope()
print "checking the current scope: ", Top()
print
print "This program can have very large number of scopes in the stack and each scope can have very large number of strings"
''' |
def main():
action, phrase, key = input("Please input action (encode/decode), phrase and key: ").split(",")
phrase = phrase.strip()
key = int(key.strip())
action = action.strip()
processed = ''
if action == 'encode':
for ch in phrase:
processed = processed + chr(ord(ch) + key)
else:
for ch in phrase:
processed = processed + chr(ord(ch) - key)
print("Processed output: {0}".format(processed))
main() |
#!/usr/bin/python3
#
# ./insert_teachers_grading_standards.py -a account_id cycle_number school_acronym course_code
# ./insert_teachers_grading_standards.py course_id cycle_number school_acronym course_code
#
# Generate a "grading standard" scale with the names of teachers as the "grades".
# Note that if the grading scale is already present, it does nothing unless the "-f" (force) flag is set.
# In the latter case it adds the grading scale.
#
# G. Q. Maguire Jr.
#
# 2020.09.24
#
# Test with
# ./insert_teachers_grading_standards.py -v 11 2 EECS II246X
# ./insert_teachers_grading_standards.py -v --config config-test.json 11 2 EECS II246X
#
#
import csv, requests, time
import optparse
import sys
import json
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
##############################################################################
## ONLY update the code below if you are experimenting with other API calls ##
##############################################################################
def create_grading_standard(course_or_account, id, name, scale):
global Verbose_Flag
# Use the Canvas API to create an grading standard
# POST /api/v1/accounts/:account_id/grading_standards
# or
# POST /api/v1/courses/:course_id/grading_standards
# Request Parameters:
#Parameter Type Description
# title Required string The title for the Grading Standard.
# grading_scheme_entry[][name] Required string The name for an entry value within a GradingStandard that describes the range of the value e.g. A-
# grading_scheme_entry[][value] Required integer -The value for the name of the entry within a GradingStandard. The entry represents the lower bound of the range for the entry. This range includes the value up to the next entry in the GradingStandard, or 100 if there is no upper bound. The lowest value will have a lower bound range of 0. e.g. 93
if course_or_account:
url = "{0}/courses/{1}/grading_standards".format(baseUrl, id)
else:
url = "{0}/accounts/{1}/grading_standards".format(baseUrl, id)
if Verbose_Flag:
print("url: {}".format(url))
payload={'title': name,
'grading_scheme_entry': scale
}
if Verbose_Flag:
print("payload={0}".format(payload))
r = requests.post(url, headers = header, json=payload)
if r.status_code == requests.codes.ok:
page_response=r.json()
print("inserted grading standard")
return True
print("r.status_code={0}".format(r.status_code))
return False
def get_grading_standards(course_or_account, id):
global Verbose_Flag
# Use the Canvas API to get a grading standard
# GET /api/v1/accounts/:account_id/grading_standards
# or
# GET /api/v1/courses/:course_id/grading_standards
# Request Parameters:
#Parameter Type Description
if course_or_account:
url = "{0}/courses/{1}/grading_standards".format(baseUrl, id)
else:
url = "{0}/accounts/{1}/grading_standards".format(baseUrl, id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if r.status_code == requests.codes.ok:
page_response=r.json()
return page_response
return None
kth_examiners=["Åberg Wennerholm, Malin",
"Åbom, Mats",
"Abtahi, Seyedfarhad",
"Ahmadian, Afshin",
"Åkermo, Malin",
"Alfredsson, Bo",
"Alfredsson, Henrik",
"Alfredsson, P. Henrik",
"Amelin, Mikael",
"Andén-Pantera, Joakim",
"Andersson, John",
"Andersson, Kristina",
"Angelis, Jannis",
"Annadotter, Kerstin",
"Ansell, Anders",
"Archenti, Andreas",
"Arias Hurtado, Jaime",
"Arias, Jaime",
"Artho, Cyrille",
"Artman, Henrik",
"Arvidsson, Niclas",
"Arvidsson, Niklas",
"Azizpour, Hossein",
"Baalsrud Hauge, Jannicke",
"Bäbler, Matthäus",
"Bagheri, Shervin",
"Bälter, Olle",
"Bälter, Olof",
"Ban, Yifang",
"Barman, Linda",
"Barsoum, Zuheir",
"Battini, Jean-Marc",
"Baudry, Benoit",
"Bayard, Ove",
"Becker, Matthias",
"Bejhem, Mats",
"Bellgran, Monica",
"Bengtsson, Mats",
"Ben Slimane, Slimane",
"Berggren, Björn",
"Berglund, Lars",
"Berglund, Per",
"Berg, Mats",
"Bertling, Lina",
"Besenecker, Ute",
"Beskow, Jonas",
"Bhattacharya, Prosun",
"Bjerklöv, Kristian",
"Björk, Folke",
"Björklund, Anna",
"Björkman, Mårten",
"Blomgren, Henrik",
"Bodén, Hans",
"Bogdan, Cristian M",
"Bohbot, Zeev",
"Boij, Susann",
"Boman, Magnus",
"Borgenstam, Annika",
"Borgström, Sara",
"Boström, Henrik",
"Bradley, Karin",
"Brandão, Miguel",
"Brandt, Luca",
"Braunerhjelm, Pontus",
"Bresin, Roberto",
"Brismar, Hjalmar",
"Brokking Balfors, Berit",
"Broström, Anders",
"Brown, Terrence",
"CAJANDER, Anders",
"Cappel, Ute B.",
"Cappel, Ute/Docent",
"Casanueva, Carlos",
"Cavdar, Cicek",
"Ceccato, Vania",
"Cetecioglu Gurol, Zeynep",
"Cetecioglu, Zeynep",
"Chacholski, Wojciech",
"Chachólski, Wojciech",
"Chang, Yong Jun",
"Chatterjee, Saikat",
"Chen, Dejiu",
"Chen, De-Jiu",
"Chen, DeJiu",
"Chen, Jiajia",
"Chiu, Justin",
"Chiu, Justin Ning-Wei",
"Chiu, Justin NingWei",
"Chiu, Ningwei Justin",
"Chunliang, Wang",
"Claesson, Joachim",
"Claesson, Per",
"Colarieti Tosti, Massimiliano",
"Comber, Rob",
"Comber, Robert",
"Cornell, Ann",
"Cronhjort, Andreas",
"Cvetkovic, Vladimir",
"Dahlberg, Leif",
"Dahlqvist, Patric",
"Damjanovic, Danijela",
"Dam, Mads",
"Dán, György",
"Danielsson, Mats",
"Dimarogonas, Dimos V.",
"Di Rocco, Sandra",
"Djehiche, Boualem",
"Dominguez, Isabel",
"Drugge, Lars",
"Dubrova, Elena",
"Duits, Maurice",
"Edin Grimheden, Martin",
"Edin, Hans Ezz",
"Edlund, Ulrica",
"Ekbäck, Peter",
"Ekeberg, Örjan",
"Ek, Monica",
"Ekstedt, Mathias",
"Eliasson, Anders",
"Emmer, Åsa",
"Engström, Susanne",
"Engvall, Klas",
"Engwall, Mats",
"Engwall, Olov",
"Enqvist, Per",
"Eriksson, Andrea",
"Ersson, Mikael",
"Fahlstedt, Madelen",
"Faleskog, Jonas",
"Fan, Huaan",
"Farshid, Mana",
"Feng, Lei",
"Fernaeus, Ylva",
"Finne Wistrand, Anna",
"Finnveden, Göran",
"Fischione, Carlo",
"Flierl, Markus",
"Fodor, Gabor",
"Fodor, Viktória",
"Folkesson, Johan",
"Folkesson, John",
"Forsberg, Kerstin",
"Forsgren, Anders",
"Forsman, Mikael",
"Fransén, Erik",
"Franson, Per",
"Fuglesang, Christer",
"Furó, István",
"Fuso Nerini, Francesco",
"Fuso-Nerini, Francesco",
"Galjic, Fadil",
"Gardner, James",
"Garme, Karl",
"Gasser, Christian",
"Gasser, T. Christian",
"Geschwind, Lars",
"Ghandari, Mehrdad",
"Gidofalvi, Gyözö",
"Girdzijauskas, Sarunas",
"Glaser, Bjoern",
"Göransson, Peter",
"Gräslund, Torbjörn",
"Grimheden, Martin",
"Grishenkov, Dmitry",
"Gröndahl, Fredrik",
"Guanciale, Roberto",
"Gudmundsson, Kjartan",
"Gullberg, Annica",
"Gulliksen, Jan",
"Gustafson, Joakim",
"Gustafsson, Joakim",
"Gustafsson, Jon Petter",
"Gustavsson, Johan",
"Gutierrez-Farewik, Elena",
"Haas, Tigran",
"Ha, Claes, Hansson",
"Hagström, Peter",
"Håkansson, Anne",
"Håkansson, Cecilia",
"Håkansson, Maria",
"Håkanssson, Maria",
"Hallén, Anders",
"Hallström, Stefan",
"Hammar, Mattias",
"Hanke, Michael",
"Hansson, Claes",
"Haridi, Seif",
"Hårsman, Björn",
"Håstad, Johan",
"Hatef, Madani",
"Havenvid, Malena",
"Havenvid, Malena Ingemansson",
"Hedenqvist, Mikael",
"Hedenqvist, Mikael S.",
"Hedman, Anders",
"Hedström, Peter",
"Hellgren Kotaleski, Jeanette",
"Hemani, Ahmed",
"Herman, Pawel",
"Hesamzadeh, Mohammad Reza",
"Hidell, Markus",
"Hilber, Patrik",
"Hoffman, Johan",
"Högfeldt, Anna-Karin",
"Högselius, Per",
"Höjer, Mattias",
"Holgersson, Charlotte",
"Höök, Kristina",
"Howells, Mark",
"Hsieh, Yves",
"Hult, Henrik",
"Hu, Xiaoming",
"Isaksson, Karolina",
"Isaksson, Teresa",
"Jacobsen, Elling W.",
"Jaldén, Joakim",
"Janerot Sjöberg, Birgitta",
"Janssen, Anja",
"Jansson, Magnus",
"Jayasuriya, Jeevan",
"Jenelius, Erik",
"Jensfelt, Patric",
"Jerbrant, Anna",
"Jerrelind, Jenny",
"Johansson, Anders",
"Johansson, Fredrik",
"Johansson, Hans",
"Johansson, Hans Bengt",
"Johansson, Karl H.",
"Johansson Landén, Camilla",
"Johansson, Lars",
"Johansson, Mats",
"Johansson, Mikael",
"Johnson, Magnus",
"Johnson, Pontus",
"Jonsson, B. Lars G.",
"Jonsson, Mats",
"Jönsson, Pär",
"Jonsson, Stefan",
"Kadefors, Anna",
"Kajko Mattsson, Mira Miroslawa",
"Kajko-Mattsson, Mira Miroslawa",
"Källblad Nordin, Sigrid",
"Kann, Viggo",
"Kantarelis, Efthymios",
"Karlgren, Jussi",
"Karlsson, Bo",
"Karlsson, Johan",
"Karlsson, Tomas",
"Karlström, Anders",
"Karoumi, Raid",
"Karrbom Gustavsson, Tina",
"karvonen, Andrew",
"Karvonen, Andrew",
"Kaulio, Matti",
"Kaulio, Matti A.",
"Khatiwada, Dilip",
"Kilander, Fredrik",
"Kjellström, Hedvig",
"Kleiven, Svein",
"Korenivski, Vladislav",
"Korhonen, Jouni",
"Korzhavyi, Pavel A.",
"Koski, Timo",
"Kostic, Dejan",
"Kozma, Cecilia",
"Kragic, Danica",
"Kragic Jensfelt, Danica",
"Kramer Nymark, Tanja",
"Kramer Nymark, Tanya",
"Kringos, Nicole",
"Kristina, Nyström",
"Kulachenko, Artem",
"Kullen, Anita",
"Kumar, Arvind",
"Kusar, Henrik",
"Kuttenkeuler, Jacob",
"Lagergren, Carina",
"Lagerström, Robert",
"Landén, Camilla",
"Lange, Mark",
"Lansner, Anders",
"Lantz, Ann",
"Larsson, Matilda",
"Larsson, Per-Lennart",
"Larsson, Stefan",
"Laumert, Björn",
"Laure, Erwin",
"Leander, John",
"Lennholm, Helena",
"Li, Haibo",
"Lindbäck, Leif",
"Lindbergh, Göran",
"Lindgren, Monica",
"Lindström, Mikael",
"Lindwall, Greta",
"Linusson, Svante",
"Lööf, Hans",
"Lundberg, Joakim",
"Lundell, Fredrik",
"Lundevall, Fredrik",
"Lundgren, Berndt",
"Lundqvist, Per",
"Lu, Zhonghai",
"Lu, Zonghai",
"Madani, Hatef",
"Madani Laijrani, Hatef",
"Madani Larijani, Hatef",
"Maffei, Antonio",
"Maguire Jr., Gerald Q.",
"Malkoch, Michael",
"Malm, B. Gunnar",
"Malmquist, Anders",
"Malmström, Eva",
"Malmström Jonsson, Eva",
"Malmström, Maria",
"Maniette, Louise",
"Månsson, Daniel",
"Mariani, Raffaello",
"Markendahl, Jan",
"Markendahl, Jan Ingemar",
"Mårtensson, Jonas",
"Martinac, Ivo",
"Martin, Andrew",
"Martin, Andrew R.",
"Martinsson, Gustav",
"Martin, Viktoria",
"Mats, Bejhem",
"Matskin, Mihhail",
"Mats, Nilsson",
"Mattson, Helena",
"Mattsson, Helena",
"Meijer, Sebastiaan",
"Mendonca Reis Brandao, Miguel",
"Metzger, Jonathan",
"m, Helena",
"Molin, Bengt",
"Monperrus, Martin",
"Montelius, Johan",
"Moreno, Rodrigo",
"Mörtberg, Ulla",
"Navarrete Llopis, Alejandra",
"Nee, Hans-Peter",
"Nerini, Francesco Fuso",
"Neumeister, Jonas",
"Niklaus, Frank",
"Nilson, Mats",
"Nilsson, Måns",
"Nilsson, Mats",
"NILSSON, MATS",
"Ning-Wei Chiu, Justin",
"Nissan, Albania",
"Nordström, Lars",
"Norgren, Martin",
"Norlin, Bert",
"Norrga, Staffan",
"Norström, Per",
"Nuur, Cali",
"Nybacka, Mikael",
"Nyquist, Pierre",
"Nyström, Kristina",
"Nyström, Kristina",
"Öberg, Johnny",
"Odqvist, Joakim",
"Oechtering, Tobias J.",
"Olofson, Bo",
"Olofsson, Bo",
"Olsson, Håkan",
"Olsson, Jimmy",
"Olsson, Mårten",
"Olsson, Monika",
"Olssonn, Monika",
"Ölundh Sandström, Gunilla",
"Onori, Mauro",
"O'Reilly, Ciarán J.",
"Orhan, Ibrahim",
"Österling, Lisa",
"Östlund, Sören",
"Östlund, Sörenn",
"Otero, Evelyn",
"Packendorff, Johann",
"Palm, Björn",
"Papadimitratos, Panagiotis",
"Pargman, Daniel",
"Pauletto, Sandra",
"Pavlenko, Tatjana",
"Payberah, Amir H.",
"Pears, Arnold",
"Peter Ekbäck,",
"Petrie-Repar, Paul",
"Petrova, Marina",
"Petrov, Miroslav",
"Pettersson, Lars",
"Plaza, Elzbieta",
"Pontus, Braunerhjelm",
"Quevedo-Teruel, Oscar",
"Rashid, Amid",
"Rashid, Amir",
"Rasmussen, Lars Kildehöj",
"Riml, Joakim",
"Ringertz, Ulf",
"Ritzén, Sofia",
"Rodriguez, Saul",
"Rojas, Cristian R.",
"Romero, Mario",
"Rönngren, Robert",
"Rosén, Anders",
"Rosenqvist, Christopher",
"Roxhed, Niclas",
"Rundgren, Carl-Johan",
"Runting, Helen",
"Rusu, Ana",
"Rutland, Mark W.",
"Said, Elias",
"Sallnäs, Eva-Lotta",
"Sander, Ingo",
"Säve-Söderbergh, Per Jörgen",
"Savolainen, Peter",
"Sawalha, Samer",
"Scheffel, Jan",
"Schlatter, Philipp",
"Schnelli, Kevin",
"Schulte, Christian",
"Scolamiero, Martina",
"Selleby, Malin",
"Sellgren, Ulf",
"Semere, Daniel",
"Shirabe, Takeshi",
"Silfwerbrand, Johan",
"Silveira, Semida",
"Sjödin, Peter",
"Sjögren, Anders",
"Sjöland, Thomas",
"Sjöland, Tomas",
"Slimane, Ben",
"Smedby, Örjan",
"Smith, Mark",
"Smith, Mark T.",
"Solus, Liam",
"Sörensson, Tomas",
"Stadler, Rolf",
"Ståhlgren, Stefan",
"Ståhl, Patrik",
"Stenbom, Stefan",
"Stenius, Ivan",
"Sturm, Bob",
"Subasic, Nihad",
"Sundberg, Cecilia",
"Swalaha, Samer",
"Ternström, Sten",
"Tesfamariam Semer, Daniel",
"Tesfamariam Semere, Daniel",
"Thobaben, Ragnar",
"Thottappillil, Rajeev",
"Tibert, Gunnar",
"Tilliander, Anders",
"Tisell, Claes",
"Tollmar, Konrad",
"Törngren, Martin",
"Troubitsyna, Elena",
"Ulfvengren, Pernilla",
"Uppvall, Lars",
"Urban, Frauke",
"Urciuoli, Luca",
"Usher, William",
"Vania, Ceccato",
"van Maris, Antonius",
"Vanourek, Gregg",
"Västberg, Anders",
"Viklund, Fredrik",
"Viklund, Martin",
"Vilaplana, Francisco",
"Vinuesa, Ricardo",
"Viveka, Palm",
"Vlassov, Vladimir",
"Vogt, Ulrich",
"Wågberg, Lars",
"Wahl, Anna",
"Wahlberg, Bo",
"Wålinder, Magnus",
"Wallmark, Oskar",
"Wang, Chunliang",
"Wang, Lihui",
"Wang, Xi Vincent",
"Weinkauf, Tino",
"Wennerholm, Malin",
"Wennhage, Per",
"Westlund, Hans",
"Wikander, Jan",
"Wiklund, Martin",
"Wiktorsson, Magnus",
"Willén, Jonas",
"Wingård, Lars",
"Wingård, Lasse",
"Wingquist, Erik",
"W. Lange, Mark",
"W.Lange, Mark",
"Wörman, Anders",
"Xiao, Ming",
"Zetterling, Carl-Mikael",
"Zhou, Qi",
"Zwiller, Val"]
def main():
global Verbose_Flag
global Use_local_time_for_output_flag
global Force_appointment_flag
Use_local_time_for_output_flag=True
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('-a', '--account',
dest="account",
default=False,
action="store_true",
help="Apply grading scheme to indicated account"
)
parser.add_option('-f', '--force',
dest="force",
default=False,
action="store_true",
help="Replace existing grading scheme"
)
parser.add_option('-t', '--testing',
dest="testing",
default=False,
action="store_true",
help="execute test code"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
Force_Flag=options.force
if Verbose_Flag:
print('ARGV :', sys.argv[1:])
print('VERBOSE :', options.verbose)
print('REMAINING :', remainder)
print("Configuration file : {}".format(options.config_filename))
course_or_account=True
if options.account:
course_or_account=False
else:
course_or_account=True
if Verbose_Flag:
print("Course or account {0}: course_or_account = {1}".format(options.account,
course_or_account))
if (not options.testing) and (len(remainder) < 4):
print("Insuffient arguments must provide a course_id|account_id cycle_number school_acronym course_code\n")
return
if (options.testing) and (len(remainder) < 3):
print("Insuffient arguments must provide a course_id|account_id cycle_number school_acronym\n")
return
initialize(options)
canvas_course_id=remainder[0]
if Verbose_Flag:
if course_or_account:
print("course_id={0}".format(canvas_course_id))
else:
print("account_id={0}".format(canvas_course_id))
cycle_number=remainder[1] # note that cycle_number is a string with the value '1' or '2'
school_acronym=remainder[2]
if (not options.testing):
course_code=remainder[3]
inputfile_name="course-data-{0}-cycle-{1}.json".format(school_acronym, cycle_number)
try:
with open(inputfile_name) as json_data_file:
all_data=json.load(json_data_file)
except:
print("Unable to open course data file named {}".format(inputfile_name))
print("Please create a suitable file by running the program get-degree-project-course-data.py")
sys.exit()
cycle_number_from_file=all_data['cycle_number']
school_acronym_from_file=all_data['school_acronym']
if not ((cycle_number_from_file == cycle_number) and (school_acronym_from_file == school_acronym)):
print("mis-match between data file and arguments to the program")
sys.exit()
programs_in_the_school_with_titles=all_data['programs_in_the_school_with_titles']
dept_codes=all_data['dept_codes']
all_course_examiners=all_data['all_course_examiners']
canvas_grading_standards=dict()
available_grading_standards=get_grading_standards(True, canvas_course_id)
if available_grading_standards:
for s in available_grading_standards:
old_id=canvas_grading_standards.get(s['title'], None)
if old_id and s['id'] < old_id: # use only the highest numbered instance of each scale
continue
else:
canvas_grading_standards[s['title']]=s['id']
if Verbose_Flag:
print("title={0} for id={1}".format(s['title'], s['id']))
if Verbose_Flag:
print("canvas_grading_standards={}".format(canvas_grading_standards))
if (options.testing):
potential_grading_standard_id=canvas_grading_standards.get("All examiners", None)
if Force_Flag or (not potential_grading_standard_id):
name="All examiners"
scale=[]
all_examiners=set()
for course in all_course_examiners:
for examiner in all_course_examiners[course]:
all_examiners.add(examiner)
# the following is for extreme testing
# all_examiners=kth_examiners
number_of_examiners=len(all_examiners)
print("number_of_examiners={}".format(number_of_examiners))
index=0
for e in sorted(all_examiners):
i=number_of_examiners-index
d=dict()
d['name']=e
d['value'] =(float(i)/float(number_of_examiners))*100.0
print("d={0}".format(d))
scale.append(d)
index=index+1
scale.append({'name': 'none selected', 'value': 0.0})
print("scale is {}".format(scale))
status=create_grading_standard(course_or_account, canvas_course_id, name, scale)
print("status={0}".format(status))
if Verbose_Flag and status:
print("Created new grading scale: {}".format(name))
else:
potential_grading_standard_id=canvas_grading_standards.get(course_code, None)
if Force_Flag or (not potential_grading_standard_id):
name=course_code
scale=[]
number_of_examiners=len(all_course_examiners[course_code])
index=0
for e in all_course_examiners[course_code]:
i=number_of_examiners-index
d=dict()
d['name']=e
d['value'] =(float(i)/float(number_of_examiners))*100.0
print("d={0}".format(d))
scale.append(d)
index=index+1
scale.append({'name': 'none selected', 'value': 0.0})
status=create_grading_standard(course_or_account, canvas_course_id, name, scale)
print("status={0}".format(status))
if Verbose_Flag and status:
print("Created new grading scale: {}".format(name))
if __name__ == "__main__": main()
|
#!/bin/python3
import sys
n,k, m = input().strip().split(' ')
n,k, m = [int(n),int(k), int(m)]
for a0 in range(k):
x,y = input().strip().split(' ')
x,y = [int(x),int(y)]
a = list(map(int, input().strip().split(' ')))
L = [[0 for x in range(m)] for x in range(m)]
for i in range(m):
L[i][i]=1
for cl in range(2, m+1):
for i in range(m-cl+1):
j = i+cl-1
if a[i]==a[j] and cl == 2:
L[i][j]=2
elif a[i]==a[j]:
L[i][j]=L[i+1][j-1]+2
else:
L[i][j]=max(L[i][j-1], L[i+1][j])
temp = L[0][m-1]
for x in a:
x = y
|
#!/usr/bin/env python
import boto3
# ---------------------Made by shalev pinker ------------------
# ---------------------
# -------------------- Testing of boto3 usage------------------
# This will get list of regions to iterate
##client = boto3.client('ec2',region_name='eu-west-1')
##regions = [region['RegionName'] for region in client.describe_regions()['Regions']]
##print regions
#ec2 = boto3.client('ec2')
# Connect to AWS with default configuration in ~/.aws/config
session = boto3.Session(profile_name='default')
ec2 = boto3.resource('ec2',region_name='eu-west-1')
#print ec2.volumes.all()
# This will get volume id from the instance
#instance = ec2.Instance('i-04bdb402371ee2d7e')
#print instance.block_device_mappings[0]['Ebs']['VolumeId']
# This will get the tag of the volume
##for volume in ec2.volumes.all():
## print volume.volume_id
## print volume.tags
# This will get all instances in the region
##for instance in ec2.instances.all():
##print instance.instance_id
# This will check if there is 0 tags to the instance
#if not instance.tags:
#print type(instance.tags)
# This will generate a list of images from the specified region according to filters
##image_iterator = ec2.images.filter(
## Filters=[
## {
## 'Name': 'tag:AutoAMI',
## 'Values': [
## 'True',
## ]
## },
## ],
##)
# This checks timestamp and will delete it if its over 30 days old
##for image in image_iterator:
#Delete images if date is (need to add import time module)
#x.deregister()
##print image.creation_date
|
#DEFINE AND IMPORT ALL THINGS-----------------------------------------------------------------
import threading
from threading import Thread
import win32api, win32con
import keyboard
import pyautogui
import time
import cv2
from PIL import Image
import cv2
import random
from PIL import ImageGrab
import numpy as np
from ctypes import windll, Structure, c_long, byref
#INITIALIZE SOME VALUES---------------------------------------------------------------------
screenWidth, screenHeight = pyautogui.size()
global quad
quad=0
iter=0
press=0
global runningg
runningg=True
team=0 #Team= 0 if blue team otherwise red team is 1
triangle = np.array([120, 136, 252], dtype=np.uint8) #Triangles --- RGB=252,118,119
squares = np.array([95, 150, 255], dtype=np.uint8) #Squares --- RGB=255,232,105
polygon = np.array([5, 136, 252], dtype=np.uint8) #Polygons --- RGB-118 141 252
red_team = np.array([121, 172, 241], dtype=np.uint8) #Red Players --- RGB-241,78,84
blue_team = np.array([24, 255, 225], dtype=np.uint8) #Blue players --- RGB-0,178,225
patrol = np.array([145, 129, 241], dtype=np.uint8) #RGB-241 119 221
play_background = np.array([0, 0, 205], dtype=np.uint8) #Background --- RGB-205 205 205
border = np.array([0, 0, 185], dtype=np.uint8) #RGB-185 185 185
OFFSET=60
Asplit=3
SLEEP_TIME=1
pyautogui.click(300,300)
FIND_FOOD_LIMIT=4
FOOD_NEAR_OFFSET=200
NEAR_BORDER_LIMIT=30000
global coorden
global coordborder
global enemy
enemy=red_team
global PANIC_MODE
PANIC_MODE=False
global TIME_MOVE
TIME_MOVE=0.1
global TEST_MODE
TEST_MODE=False
global BORDER_FOUND
BORDER_FOUND=False
#USER PLEASE GIVE ALL INITIALIZATIONS HERE-------------------------------------------------
#NO TEST VALUES
BROWSER_SCREEN_WIDTH=screenWidth #800
BRWOSER_SCREEN_HEIGHT=screenHeight #720
OFFSETX=0
OFFSETY=0
CENTER_X=(BROWSER_SCREEN_WIDTH/2)-(OFFSETX/2) #776/2 or 388
CENTER_Y=(BRWOSER_SCREEN_HEIGHT/2)-(OFFSETY/2) #410 or 392
#TEST VALUES
if(TEST_MODE==True):
BROWSER_SCREEN_WIDTH=800 #800
BRWOSER_SCREEN_HEIGHT=650#720
OFFSETX=0
OFFSETY=0
CENTER_X=367
CENTER_Y=387
TARGET=(CENTER_X,CENTER_Y)
#SOME FUNCTION INITIALIZATIONS--------------------------------------------------------------
class POINT(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
def queryMousePosition():
pt = POINT()
windll.user32.GetCursorPos(byref(pt))
#print(pyautogui.pixel(pt.x, pt.y))
return { "x": pt.x, "y": pt.y}
def replace_str_index(text,index=0,replacement=''):
return '%s%s%s'%(text[:index],replacement,text[index+1:])
def moveQuad():
quadpos=quad
print("In moveQuad , quadrant is {0}".format(quadpos))
if(quadpos==1):
print("Moving up right")
pyautogui.keyDown('right')
pyautogui.keyDown('up')
time.sleep(TIME_MOVE)
pyautogui.keyUp('right')
pyautogui.keyUp('up')
press=0
elif(quadpos==2):
print("Moving up left")
pyautogui.keyDown('left')
pyautogui.keyDown('up')
time.sleep(TIME_MOVE)
pyautogui.keyUp('left')
pyautogui.keyUp('up')
press=0
elif(quadpos==3):
print("Moving down left")
pyautogui.keyDown('left')
pyautogui.keyDown('down')
time.sleep(TIME_MOVE)
pyautogui.keyUp('left')
pyautogui.keyUp('down')
press=0
elif(quadpos==4):
print("Moving down right")
pyautogui.keyDown('right')
pyautogui.keyDown('down')
time.sleep(TIME_MOVE)
pyautogui.keyUp('right')
pyautogui.keyUp('down')
press=0
elif(quadpos==5):
print("Moving up")
pyautogui.keyDown('up')
time.sleep(TIME_MOVE)
pyautogui.keyUp('up')
press=0
elif(quadpos==6):
print("Moving left")
pyautogui.keyDown('left')
time.sleep(TIME_MOVE)
pyautogui.keyUp('left')
press=0
elif(quadpos==7):
print("Moving down")
pyautogui.keyDown('down')
time.sleep(TIME_MOVE)
pyautogui.keyUp('down')
press=0
else:
print("Moving right")
pyautogui.keyDown('right')
time.sleep(TIME_MOVE)
pyautogui.keyUp('right')
press=0
#DETERMINE OUR CURRENT TEAM------------------------------------------------------------------
im = pyautogui.screenshot()
rgb_im = im.convert('RGB')
r, g, b = rgb_im.getpixel((CENTER_X, CENTER_Y))
print(r, g, b)
if(r==0 and g==178 and b==225):
team=1
enemy=blue_team
print("Red Team")
else:
team=0
enemy=red_team
print("Blue team")
enemy=red_team
if(screenWidth>1000):
Asplit=4
def Calcpos():
global quad
LAST_FOOD_FOUND=0
global TIME_MOVE
global PANIC_MODE
global iter
global BORDER_FOUND
while(runningg):
print("\n-------\n")
quad=random.randint(8, 20)
pyautogui.moveTo(random.randint(CENTER_X, BROWSER_SCREEN_WIDTH-200),random.randint(100, BRWOSER_SCREEN_HEIGHT-100))
if(quad>4):
quad=5
else:
quad=7
time.sleep(random.randint(1, 3))
moveQuad()
if __name__ == '__main__':
Thread(target = Calcpos).start()
cv2.destroyAllWindows() |
import multiprocessing
class ProcesTest(multiprocessing.Process): # functia de crearea unui proces intr-o subclasa
def run(self):
print(f'am apelat metoda run() in procesul: {self.name}')
return
if __name__ == '__main__':
jobs = [] # array unde vor fi adaugate procesele
for i in range(5):
p = ProcesTest() # se creeaza 5 procese apelandu-se functia de mai sus afisandu-se mesajul coresp.
jobs.append(p) # se adauga in array apoi se inchid procesele
p.start()
p.join() |
from DBModel import *
from BaseModel import BaseModel
from peewee import *
import datetime
class tEmployee(BaseModel):
EmpID = CharField(unique=True, max_length=50, primary_key=True)
Name = CharField(max_length=45)
ServiceDate = DateTimeField()
#Created = DateTimeField(default=datetime.datetime.now)
LastViewed = DateTimeField(null=True)
|
######################################################################
######################################################################
# Copyright Tsung-Hsien Wen, Cambridge Dialogue Systems Group, 2017 #
######################################################################
######################################################################
import re
import sys
import simplejson as json
import operator
import random
import numpy as np
from copy import deepcopy
from utils.nlp import normalize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
digitpat = re.compile('\d+')
class DataSplit(object):
# data split helper , for split dataset into train/valid/test
def __init__(self, split):
self.split = split
self.sum = sum(split)
def train_valid(self, data):
# split the dataset into train+valid
e = int(len(data) * float(sum(self.split[:2])) / float(self.sum))
return data[:e]
def train(self, train_valid):
# split training from train+valid
e = int(len(train_valid) * \
float(self.split[0]) / float((sum(self.split[:2]))))
return train_valid[:e]
def valid(self, train_valid):
# split validation from train+valid
s = len(self.train(train_valid))
return train_valid[s:]
def test(self, data):
# split the dataset into testing
s = len(self.train_valid(data))
return data[s:]
class DataReader(object):
inputvocab = []
outputvocab = []
ngrams = {}
idx2ngs = []
def __init__(self,
corpusfile, dbfile, semifile, s2vfile,
split, lengthen, percent, shuffle,
trkenc, verbose, mode, att=False, latent_size=1):
self.att = True if att == 'attention' else False
self.dl = latent_size
self.data = {'train': [], 'valid': [], 'test': []} # container for data
self.mode = 'train' # mode for accessing data
self.index = 0 # index for accessing data
# data manipulators
self.split = DataSplit(split) # split helper
self.trkenc = trkenc
self.lengthen = lengthen
self.shuffle = shuffle
# NLTK stopword module
self.stopwords = set(stopwords.words('english'))
for w in ['!', ',', '.', '?', '-s', '-ly', '</s>', 's']:
self.stopwords.add(w)
# loading files
self.db = self.loadjson(dbfile)
self.s2v = self.loadjson(s2vfile)
self.semidict = self.loadjson(semifile)
self.dialog = self.loadjson(corpusfile)
# producing slot value templates and db represetation
self.prepareSlotValues()
self.structureDB()
# load dialog
self.loadVocab()
if mode != 'sds':
self.loadDialog()
self.loadSemantics()
# goal
self.parseGoal()
# split dataset
if mode != 'sds':
self._setupData(percent)
if verbose: self._printStats()
def loadDialog(self):
# index words and make it suitable for NN input
self.sourceutts = []
self.targetutts = []
self.masked_sourceutts = []
self.masked_targetutts = []
self.sourcecutoffs = []
self.targetcutoffs = []
self.masked_sourcecutoffs = []
self.masked_targetcutoffs = []
# delexicalised positions
self.delsrcpos = []
self.deltarpos = []
# finished dialogs
self.finished = []
# venue specific - offered/changing
self.offers = []
self.changes = []
# snapshot vectors
self.snapshot_vecs = []
# for each dialogue
dcount = 0.0
tcount = 0.0
# for VAE initialisation
self.sentGroupIndex = []
groupidx = 0
for d in self.dialog:
# consider finished flag
if d.has_key('finished'):
self.finished.append(d['finished'])
else:
self.finished.append(True)
# print loading msgs
dcount += 1.0
print '\tloading dialog from file ... finishing %.2f%%\r' % \
(100.0 * float(dcount) / float(len(self.dialog))),
sys.stdout.flush()
# container for each turn
sourceutt = []
targetutt = []
m_sourceutt = []
m_targetutt = []
utt_group = []
srcpos = []
tarpos = []
maxtar = -1
maxsrc = -1
maxmtar = -1
maxmsrc = -1
maxfeat = -1
offers = []
changes = []
prevoffer = []
offered = False
snapshot_vecs = []
# for each turn in a dialogue
for t in range(len(d['dial'])):
tcount += 1
turn = d['dial'][t]
# extract system side sentence feature
sent = turn['sys']['sent']
mtar, tar, spos, vpos, venues \
= self.extractSeq(sent, type='target')
# store sentence group
utt_group.append(self.sentGroup[groupidx])
groupidx += 1
# changing offer label
if len(venues) != 0 and venues[0] not in prevoffer: # not matching
if prevoffer == []: # new offer
change = [0, 1]
else: # changing offer
change = [1, 0]
prevoffer = venues
else:
change = [0, 1]
changes.append(change)
# offer label
if offered or len(venues) != 0: # offer has happened
offer = [1, 0]
offered = True
else:
offer = [0, 1]
offers.append(offer)
# delexicalised
if len(mtar) > maxtar:
maxtar = len(mtar)
m_targetutt.append(mtar)
# extract snapshot vectors
snapshot_vec = [[0.0 for x in range(len(self.snapshots))]]
# add offer and change to snapshot vector
if offer == [1, 0]: snapshot_vec[0][
self.snapshots.index('OFFERED')] = 1.0
if change == [1, 0]: snapshot_vec[0][
self.snapshots.index('CHANGED')] = 1.0
# attentive snapshot
for w in mtar[::-1]:
ssvec = deepcopy(snapshot_vec[0])
if self.vocab[w] in self.snapshots:
ssvec[self.snapshots.index(
self.vocab[w])] = 1.0
snapshot_vec.insert(0, ssvec)
# decide changing snapshot or not
if self.att == True:
snapshot_vecs.append(snapshot_vec[:-1])
else:
snapshot_vecs.append([deepcopy(snapshot_vec[0])
for x in snapshot_vec[:-1]])
# handling positional features
for f in spos:
if len(f) > maxfeat:
maxfeat = len(f)
for f in vpos:
if len(f) > maxfeat:
maxfeat = len(f)
tarpos.append([spos, vpos])
# non delexicalised
if len(tar) > maxmtar:
maxmtar = len(tar)
targetutt.append(tar)
# usr responses
sent = turn['usr']['transcript']
msrc, src, spos, vpos, _ = self.extractSeq(sent, type='source')
# delexicalised
if len(msrc) > maxsrc:
maxsrc = len(msrc)
m_sourceutt.append(msrc)
# handling positional features
for f in spos:
if len(f) > maxfeat:
maxfeat = len(f)
for f in vpos:
if len(f) > maxfeat:
maxfeat = len(f)
srcpos.append([spos, vpos])
# non delexicalised
if len(src) > maxmsrc:
maxmsrc = len(src)
sourceutt.append(src)
# sentence group
self.sentGroupIndex.append(utt_group)
# offers
self.changes.append(changes)
self.offers.append(offers)
# padding for snapshots
for i in range(len(m_targetutt)):
snapshot_vecs[i].extend(
[snapshot_vecs[i][0]] * \
(maxtar - len(m_targetutt[i])))
# padding unk tok
m_sourcecutoff = []
m_targetcutoff = []
for i in range(len(m_targetutt)):
m_targetcutoff.append(len(m_targetutt[i]))
m_targetutt[i].extend(
[self.vocab.index('<unk>')] * \
(maxtar - len(m_targetutt[i])))
for i in range(len(m_sourceutt)):
m_sourcecutoff.append(len(m_sourceutt[i]))
m_sourceutt[i].extend(
[self.vocab.index('<unk>')] * \
(maxsrc - len(m_sourceutt[i])))
# non delexicalised version
sourcecutoff = []
targetcutoff = []
for i in range(len(targetutt)):
targetcutoff.append(len(targetutt[i]))
targetutt[i].extend(
[self.vocab.index('<unk>')] * \
(maxmtar - len(targetutt[i])))
for i in range(len(sourceutt)):
sourcecutoff.append(len(sourceutt[i]))
sourceutt[i].extend(
[self.vocab.index('<unk>')] * \
(maxmsrc - len(sourceutt[i])))
# padding positional features
for i in range(len(tarpos)):
for j in range(len(tarpos[i])):
for k in range(len(tarpos[i][j])):
tarpos[i][j][k].extend( \
[-1] * (maxfeat - len(tarpos[i][j][k])))
for i in range(len(srcpos)):
for j in range(len(srcpos[i])):
for k in range(len(srcpos[i][j])):
srcpos[i][j][k].extend( \
[-1] * (maxfeat - len(srcpos[i][j][k])))
# entire dialogue matrix
self.sourceutts.append(sourceutt)
self.targetutts.append(targetutt)
self.sourcecutoffs.append(sourcecutoff)
self.targetcutoffs.append(targetcutoff)
self.masked_sourceutts.append(m_sourceutt)
self.masked_targetutts.append(m_targetutt)
self.masked_sourcecutoffs.append(m_sourcecutoff)
self.masked_targetcutoffs.append(m_targetcutoff)
self.snapshot_vecs.append(snapshot_vecs)
# positional information
self.delsrcpos.append(srcpos)
self.deltarpos.append(tarpos)
def loadSemantics(self):
# sematic labels
self.info_semis = []
self.req_semis = []
self.db_logics = []
sumvec = np.array([0 for x in range(self.infoseg[-1])])
# for each dialogue
dcount = 0.0
for dx in range(len(self.dialog)):
d = self.dialog[dx]
# print loading msgs
dcount += 1.0
print '\tloading semi labels from file ... finishing %.2f%%\r' % \
(100.0 * float(dcount) / float(len(self.dialog))),
sys.stdout.flush()
# container for each turn
info_semi = []
req_semi = []
semi_idxs = []
db_logic = []
# for each turn in a dialogue
for t in range(len(d['dial'])):
turn = d['dial'][t]
# read informable semi
semi = sorted(['pricerange=none', 'food=none', 'area=none']) \
if len(info_semi) == 0 else deepcopy(info_semi[-1])
for da in turn['usr']['slu']:
for s2v in da['slots']:
# skip invalid slots
if len(s2v) != 2 or s2v[0] == 'slot':
continue
s, v = s2v
# need to replace the slot with system request
if v == 'dontcare' and s == 'this':
sdas = d['dial'][t - 1]['sys']['DA']
for sda in sdas:
if sda['act'] == 'request':
s = sda['slots'][0][-1]
break
toreplace = None
for sem in semi:
if s in sem:
toreplace = sem
break
if s == 'this':
continue
else:
if toreplace:
semi.remove(toreplace)
semi.append(s + '=' + v)
# if goal changes not venue changes
if self.changes[dx][t] == [1, 0]:
if info_semi[-1] != sorted(semi):
self.changes[dx][t] = [0, 1]
info_semi.append(sorted(semi))
# indexing semi and DB
vec = [0 for x in range(self.infoseg[-1])]
constraints = []
for sem in semi:
if 'name=' in sem:
continue
vec[self.infovs.index(sem)] = 1
if self.infovs.index(sem) not in self.dontcare:
constraints.append(self.infovs.index(sem))
semi_idxs.append(vec)
sumvec += np.array(vec)
infosemi = semi
# check db match
match = [len(filter(lambda x: x in constraints, sub)) \
for sub in self.db2inf]
venue_logic = [int(x >= len(constraints)) for x in match]
vcount = 0
for midx in range(len(venue_logic)):
if venue_logic[midx] == 1:
vcount += len(self.idx2db[midx])
if vcount <= 3:
dummy = [0 for x in range(6)]
dummy[vcount] = 1
venue_logic.extend(dummy)
elif vcount <= 5:
venue_logic.extend([0, 0, 0, 0, 1, 0])
else:
venue_logic.extend([0, 0, 0, 0, 0, 1])
db_logic.append(venue_logic)
# read requestable semi
semi = sorted(['food', 'pricerange', 'area']) + \
sorted(['phone', 'address', 'postcode'])
for da in turn['usr']['slu']:
for s2v in da['slots']:
if s2v[0] == 'slot':
for i in range(len(semi)):
if s2v[1] == semi[i]:
semi[i] += '=exist'
for i in range(len(semi)):
if '=exist' not in semi[i]:
semi[i] += '=none'
vec = [0 for x in range(self.reqseg[-1])]
for sem in semi:
vec[self.reqs.index(sem)] = 1
req_semi.append(vec)
self.info_semis.append(semi_idxs)
self.req_semis.append(req_semi)
self.db_logics.append(db_logic)
print
def extractSeq(self, sent, type='source', normalise=False, index=True):
# setup vocab
if type == 'source':
vocab = self.vocab
elif type == 'target':
vocab = self.vocab
# standardise sentences
if normalise:
sent = normalize(sent)
# preporcessing
words = sent.split()
if type == 'source':
if len(words) == 0: words = ['<unk>']
elif type == 'target':
words = ['</s>'] + words + ['</s>']
# indexing, non-delexicalised
if index:
idx = map(lambda w: vocab.index(w) if w in vocab else 0, words)
else:
idx = words
# delexicalise all
sent = self.delexicalise(' '.join(words), mode='all')
sent = re.sub(digitpat, '[VALUE_COUNT]', sent)
words = sent.split()
# formulate delex positions
allvs = self.infovs + self.reqs
sltpos = [[] for x in allvs]
valpos = [[] for x in allvs]
names = []
for i in range(len(words)):
if '::' not in words[i]:
continue
# handling offer changing
if words[i].startswith('[VALUE_NAME]'):
name = words[i].replace('[VALUE_NAME]::', '')
names.append(name)
# remove pos identifier
tok, ID = words[i].split("::")
words[i] = tok
# record position
mytok, sov = tok[1:-1].lower().split('_')
ID = ID.replace('-', ' ')
mylist = sltpos if mytok == 'slot' else valpos
for j in range(len(allvs)):
s, v = allvs[j].split('=')
comp = s if mytok == 'slot' else v
if comp == ID:
if mytok == 'slot':
sltpos[j].append(i)
else:
valpos[j].append(i)
# indexing, delexicalised
if index:
midx = map(lambda w: vocab.index(w) if w in vocab else 0, words)
else:
midx = words
return midx, idx, sltpos, valpos, names
def delexicalise(self, utt, mode='all'):
inftoks = ['[VALUE_' + s.upper() + ']' for s in self.s2v['informable'].keys()] + \
['[SLOT_' + s.upper() + ']' for s in self.s2v['informable'].keys()] + \
['[VALUE_DONTCARE]', '[VALUE_NAME]'] + \
['[SLOT_' + s.upper() + ']' for s in self.s2v['requestable'].keys()]
reqtoks = ['[VALUE_' + s.upper() + ']' for s in self.s2v['requestable'].keys()]
for i in range(len(self.values)):
# informable mode, preserving location information
if mode == 'informable' and self.slots[i] in inftoks:
tok = self.slots[i] + '::' + (self.supervalues[i]).replace(' ', '-')
utt = (' ' + utt + ' ').replace(' ' + self.values[i] + ' ', ' ' + tok + ' ')
utt = utt[1:-1]
# requestable mode
elif mode == 'requestable' and self.slots[i] in reqtoks:
utt = (' ' + utt + ' ').replace(' ' + self.values[i] + ' ', ' ' + self.slots[i] + ' ')
utt = utt[1:-1]
elif mode == 'all':
tok = self.slots[i] + '::' + (self.supervalues[i]).replace(' ', '-') \
if self.slots[i] in inftoks else self.slots[i]
utt = (' ' + utt + ' ').replace(' ' + self.values[i] + ' ', ' ' + tok + ' ')
utt = utt[1:-1]
utt = re.sub(digitpat, '[VALUE_COUNT]', utt)
return utt
def delexicaliseOne(self, utt, toks, repl):
for tok in toks:
utt = (' ' + utt + ' ').replace(' ' + tok + ' ', ' ' + repl + ' ')
utt = utt[1:-1]
return utt
def prepareSlotValues(self):
print '\tprepare slot value templates ...'
# put db requestable values into s2v
for e in self.db:
for s, v in e.iteritems():
if self.s2v['requestable'].has_key(s):
self.s2v['requestable'][s].append(v.lower())
if self.s2v['other'].has_key(s):
self.s2v['other'][s].append(v.lower())
# sort values
for s, vs in self.s2v['informable'].iteritems():
self.s2v['informable'][s] = sorted(list(set(vs)))
for s, vs in self.s2v['requestable'].iteritems():
self.s2v['requestable'][s] = sorted(list(set(vs)))
for s, vs in self.s2v['other'].iteritems():
self.s2v['other'][s] = sorted(list(set(vs)))
# make a 1-on-1 mapping for delexicalisation
self.supervalues = []
self.values = []
self.slots = []
for s, vs in self.s2v['informable'].iteritems():
# adding slot delexicalisation
self.supervalues.extend([s for x in self.semidict[s]])
self.values.extend([normalize(x) for x in self.semidict[s]])
self.slots.extend(['[SLOT_' + s.upper() + ']' for x in self.semidict[s]])
# adding value delexicalisation
for v in vs:
self.supervalues.extend([v for x in self.semidict[v]])
self.values.extend([normalize(x) for x in self.semidict[v]])
self.slots.extend(['[VALUE_' + s.upper() + ']' for x in self.semidict[v]])
for s, vs in self.s2v['requestable'].items() + self.s2v['other'].items():
# adding value delexicalisation
self.values.extend([normalize(v) for v in vs])
self.supervalues.extend([v for v in vs])
self.slots.extend(['[VALUE_' + s.upper() + ']' for v in vs])
# adding slot delexicalisation
self.supervalues.extend([s for x in self.semidict[s]])
self.values.extend([normalize(x) for x in self.semidict[s]])
self.slots.extend(['[SLOT_' + s.upper() + ']' for x in self.semidict[s]])
# incorporate dontcare values
self.values.extend([normalize(v) for v in self.semidict['any']])
self.supervalues.extend(['dontcare' for v in self.semidict['any']])
self.slots.extend(['[VALUE_DONTCARE]' for v in self.semidict['any']])
# sorting according to length
self.values, self.supervalues, self.slots = zip(*sorted( \
zip(self.values, self.supervalues, self.slots), \
key=lambda x: len(x[0]), reverse=True))
# for generating semantic labels
self.infovs = []
self.infoseg = [0]
self.reqs = []
self.reqseg = [0]
self.dontcare = []
for s in sorted(self.s2v['informable'].keys()):
self.infovs.extend([s + '=' + v for v in self.s2v['informable'][s]])
self.infovs.append(s + '=dontcare')
self.infovs.append(s + '=none')
self.infoseg.append(len(self.infovs))
# dont care values
self.dontcare.append(len(self.infovs) - 1)
self.dontcare.append(len(self.infovs) - 2)
for s in sorted(self.s2v['informable'].keys()):
self.reqs.extend([s + '=exist', s + '=none'])
self.reqseg.append(len(self.reqs))
for s in sorted(self.s2v['requestable'].keys()):
self.reqs.extend([s + '=exist', s + '=none'])
self.reqseg.append(len(self.reqs))
# for ngram indexing
self.ngs2v = []
for s in sorted(self.s2v['informable'].keys()):
self.ngs2v.append((s, self.s2v['informable'][s] + ['any', 'none']))
for s in sorted(self.s2v['informable'].keys()):
self.ngs2v.append((s, ['exist', 'none']))
for s in sorted(self.s2v['requestable'].keys()):
self.ngs2v.append((s, ['exist', 'none']))
def loadjson(self, filename):
with open(filename) as data_file:
for i in range(5):
data_file.readline()
data = json.load(data_file)
return data
def _printStats(self):
print '\n==============='
print 'Data statistics'
print '==============='
print 'Train : %d' % len(self.data['train'])
print 'Valid : %d' % len(self.data['valid'])
print 'Test : %d' % len(self.data['test'])
print '==============='
print 'Voc : %d' % len(self.vocab)
if self.trkenc == 'ng':
print 'biGram: : %d' % len(self.bigrams)
print 'triGram: : %d' % len(self.trigrams)
if self.trkenc == 'ng':
print 'All Ngram: %d' % len(self.ngrams)
print '==============='
print 'Venue : %d' % len(self.db2inf)
print '==============='
def _setupData(self, percent):
# zip corpus
if self.trkenc == 'ng':
trksrc = self.ngram_source
trktar = self.ngram_target
else:
trksrc = self.delsrcpos
trktar = self.deltarpos
corpus = [self.sourceutts, self.sourcecutoffs,
self.masked_sourceutts, self.masked_sourcecutoffs,
self.targetutts, self.targetcutoffs,
self.masked_targetutts, self.masked_targetcutoffs,
self.snapshot_vecs,
self.changes, self.goals,
self.info_semis, self.req_semis,
np.array(self.db_logics),
trksrc, trktar,
self.finished, self.sentGroupIndex]
corpus = zip(*corpus)
# split out train+valid
train_valid = self.split.train_valid(corpus)
# cut dataset according to percentage
percent = float(percent) / float(100)
train_valid = train_valid[:int(len(train_valid) * percent)]
# split into train/valid/test
self.data['train'] = self.split.train(train_valid)
self.data['valid'] = self.split.valid(train_valid)
self.data['test'] = self.split.test(corpus)
def read(self, mode='train'):
## default implementation for read() function
if self.mode != mode:
self.mode = mode
index = 0
# end of data , reset index & return None
if self.index >= len(self.data[mode]):
data = None
self.index = 0
if mode != 'test': # train or valid, do shuffling
if self.shuffle == 'static': # just shuffle current set
random.shuffle(self.data[mode])
elif self.shuffle == 'dynamic':
# shuffle train + valid together
train_valid = self.data['train'] + self.data['valid']
random.shuffle(train_valid)
self.data['train'] = self.split.train(train_valid)
self.data['valid'] = self.split.valid(train_valid)
return data
# 1 dialog at a time
data = deepcopy(list(self.data[mode][self.index]))
lengthen_idx = 1
while lengthen_idx < self.lengthen and \
self.index + lengthen_idx < len(self.data[mode]):
# lengthen the data by combining two data points
nextdata = deepcopy(list(self.data[mode][self.index + lengthen_idx]))
data = self.lengthenData(data, nextdata, mode)
lengthen_idx += 1
self.index += lengthen_idx
return data
def lengthenData(self, data, addon, mode):
# for t in range(len(data[10])):
# print np.nonzero(np.array(data[10][t]))
for i in range(len(data)): # for every data matrix
if isinstance(data[i], list):
idx = [0, 2, 4, 6]
if i in idx: # sequences, need padding
maxleng = max(len(data[i][0]), len(addon[i][0]))
for t in range(len(data[i])): # for each turn
data[i][t].extend([0] * (maxleng - len(data[i][t])))
for t in range(len(addon[i])): # for each turn
addon[i][t].extend([0] * (maxleng - len(addon[i][t])))
idx = [8]
if i in idx: # snapshot vectors
maxleng = max(len(data[i][0]), len(addon[i][0]))
for t in range(len(data[i])): # turn
data[i][t].extend([[-1 for cnt in \
range(len(data[i][t][0]))]] * (maxleng - len(data[i][t])))
for t in range(len(addon[i])): # turn
addon[i][t].extend([[-1 for cnt in \
range(len(addon[i][t][0]))]] * (maxleng - len(addon[i][t])))
idx = [14, 15]
if i in idx: # ngram/position features
maxleng = max(len(data[i][0][0][0]), len(addon[i][0][0][0]))
for t in range(len(data[i])): # turn
for x in range(len(data[i][t])): # slot or value
for sv in range(len(data[i][t][x])): # each value
data[i][t][x][sv].extend([-1] * \
(maxleng - len(data[i][t][x][sv])))
for t in range(len(addon[i])): # turn
for x in range(len(addon[i][t])): # slot or value
for sv in range(len(addon[i][t][x])): # each value
addon[i][t][x][sv].extend([-1] * \
(maxleng - len(addon[i][t][x][sv])))
data[i] = addon[i] + data[i]
# propagte tracker labels
for t in range(len(data[11])):
for s in range(len(self.infoseg[:-1])):
if t != 0 and data[11][t][self.infoseg[s]:self.infoseg[s + 1]][-1] == 1:
data[11][t][self.infoseg[s]:self.infoseg[s + 1]] = \
data[11][t - 1][self.infoseg[s]:self.infoseg[s + 1]]
# print np.nonzero(np.array(data[10][t]))
# print np.array(data[0]).shape
# raw_input()
"""
for i in range(len(data)):
try: data[i] = np.array(data[i],dtype='float32')
except: pass
"""
return data
def iterate(self, mode='test', proc=True):
# default implementation for iterate() function
return self.data[mode]
def structureDB(self):
# all informable values
print '\tformatting DB ...'
# represent each db entry with informable values
self.db2inf = []
self.db2idx = []
self.idx2db = []
self.idx2ent = {}
for i in range(len(self.db)):
e = self.db[i]
e2inf = []
for s, v in e.iteritems():
if s in self.s2v['informable']:
e2inf.append(self.infovs.index(s + '=' + v))
e2inf = sorted(e2inf)
# if not repeat, create new entry
if e2inf not in self.db2inf:
self.db2inf.append(e2inf)
self.db2idx.append(len(self.db2inf) - 1)
self.idx2db.append([e2inf])
self.idx2ent[self.db2inf.index(e2inf)] = [e]
else: # if repeat, indexing back
self.db2idx.append(self.db2inf.index(e2inf))
self.idx2db[self.db2inf.index(e2inf)].append(e2inf)
self.idx2ent[self.db2inf.index(e2inf)].append(e)
# create hash for finding db index by name
self.n2db = {}
for i in range(len(self.db)):
self.n2db[self.db[i]['name'].lower()] = self.db2idx[i]
def loadVocab(self):
# iterate through dialog and make vocab
self.inputvocab = ['[VALUE_DONTCARE]', '[VALUE_COUNT]']
self.outputvocab = ['[VALUE_DONTCARE]', '[VALUE_COUNT]']
self.vocab = []
# init inputvocab with informable values
for s, vs in self.s2v['informable'].iteritems():
for v in vs:
if v == 'none': continue
self.inputvocab.extend(v.split())
self.inputvocab.extend(['[SLOT_' + s.upper() + ']', '[VALUE_' + s.upper() + ']'])
self.outputvocab.extend(['[SLOT_' + s.upper() + ']', '[VALUE_' + s.upper() + ']'])
# add every word in semidict into vocab
for s in self.semidict.keys():
for v in self.semidict[s]:
self.inputvocab.extend(v.split())
# for grouping sentences
sentKeys = {}
self.sentGroup = []
# lemmatizer
lmtzr = WordNetLemmatizer()
# form lexican
ivocab = []
ovocab = []
for i in range(len(self.dialog)):
print '\tsetting up vocab, finishing ... %.2f%%\r' % \
(100.0 * float(i) / float(len(self.dialog))),
sys.stdout.flush()
# parsing dialog
for j in range(len(self.dialog[i]['dial'])):
# text normalisation
self.dialog[i]['dial'][j]['sys']['sent'] = normalize(
self.dialog[i]['dial'][j]['sys']['sent'])
self.dialog[i]['dial'][j]['usr']['transcript'] = normalize(
self.dialog[i]['dial'][j]['usr']['transcript'])
# this turn
turn = self.dialog[i]['dial'][j]
# system side
words, _, _, _, _ = self.extractSeq(turn['sys']['sent'], \
type='target', index=False)
ovocab.extend(words)
# sentence group key
key = tuple(set(sorted(
[lmtzr.lemmatize(w) for w in words if w not in self.stopwords])))
if key in sentKeys:
sentKeys[key][1] += 1
self.sentGroup.append(sentKeys[key][0])
else:
sentKeys[key] = [len(sentKeys), 1]
self.sentGroup.append(sentKeys[key][0])
# user side
words = self.delexicalise(turn['usr']['transcript']).split()
mwords, words, _, _, _ = self.extractSeq(turn['sys']['sent'], \
type='source', index=False)
ivocab.extend(mwords)
# ivocab.extend(words)
"""
for hyp in t['usr']['asr']:
words = self.delexicalise(normalize(hyp['asr-hyp'])).split()
ivocab.extend(words)
"""
print
# re-assigning sentence group w.r.t their frequency
mapping = {}
idx = 0
cnt = 0
for key, val in sorted(sentKeys.iteritems(), key=lambda x: x[1][1], reverse=True):
mapping[val[0]] = idx
# print idx, val[1], key
if idx < self.dl - 1: cnt += val[1]
idx += 1
# raw_input()
print '\tsemi-supervised action examples: %2.2f%%' % \
(float(cnt) / float(len(self.sentGroup)) * 100)
for i in range(len(self.sentGroup)):
self.sentGroup[i] = min(mapping[self.sentGroup[i]], self.dl - 1)
# set threshold for input vocab
counts = dict()
for w in ivocab:
counts[w] = counts.get(w, 0) + 1
self.inputvocab = ['<unk>', '</s>', '<slot>', '<value>'] + \
sorted(list(set(self.inputvocab + \
[w for w, c in sorted(counts.iteritems(), key=operator.itemgetter(1)) if
c > 1])))
# set threshold for output vocab
counts = dict()
for w in ovocab:
counts[w] = counts.get(w, 0) + 1
self.outputvocab = ['<unk>', '</s>'] + \
sorted(list(set(self.outputvocab + ['thank', 'you', 'goodbye'] + \
[w for w, c in sorted(counts.iteritems(), key=operator.itemgetter(1))])))
# the whole vocab
self.vocab = ['<unk>', '</s>', '<slot>', '<value>'] + \
list(set(self.inputvocab[4:]).union(self.outputvocab[2:]))
# create snapshot dimension
self.snapshots = ['OFFERED', 'CHANGED']
for w in self.outputvocab:
if w.startswith('[VALUE'):
self.snapshots.append(w)
self.snapshots = sorted(self.snapshots)
def parseGoal(self):
# parse goal into dict format
self.goals = []
# for computing corpus success
requestables = ['phone', 'address', 'postcode', 'food', 'area', 'pricerange']
vmc, success = 0., 0.
# for each dialog
for i in range(len(self.dialog)):
d = self.dialog[i]
goal = [np.zeros(self.infoseg[-1]),
np.zeros(self.reqseg[-1])]
for s2v in d['goal']['constraints']:
s, v = s2v
s2v = s + '=' + v
if v != 'dontcare' and v != 'none':
# goal['inf'].append( self.infovs.index(s2v) )
goal[0][self.infovs.index(s2v)] = 1
for s in d['goal']['request-slots']:
if s == 'pricerange' or s == 'area' or s == 'food':
continue
# goal['req'].append(self.reqs.index(s+'=exist'))
goal[1][self.reqs.index(s + '=exist')] = 1
self.goals.append(goal)
# compute corpus success
m_targetutt = self.masked_targetutts[i]
m_targetutt_len = self.masked_targetcutoffs[i]
# for computing success
offered = False
requests = []
# iterate each turn
for t in range(len(m_targetutt)):
sent_t = [self.vocab[w] for w in
m_targetutt[t][:m_targetutt_len[t]]][1:-1]
if '[VALUE_NAME]' in sent_t: offered = True
for requestable in requestables:
if '[VALUE_' + requestable.upper() + ']' in sent_t:
requests.append(self.reqs.index(requestable + '=exist'))
# compute success
if offered:
vmc += 1.
if set(requests).issuperset(set(goal[1].nonzero()[0].tolist())):
success += 1.
print '\tCorpus VMC : %2.2f%%' % (vmc / float(len(self.dialog)) * 100)
print '\tCorpus Success : %2.2f%%' % (success / float(len(self.dialog)) * 100)
#########################################################################
############################## Deprecated ###############################
#########################################################################
"""
def loadNgramVocab(self):
# build bi/tri-gram indexes
print '\tsetting up bigram/trigram vocab'
self.bigrams = []
self.trigrams= []
for dcount in range(len(self.dialog)):
# parsing dialog
print '\tloading n-gram features from file ... finishing %.2f%%\r'%\
(100.0*float(dcount)/float(len(self.dialog))),
sys.stdout.flush()
d = self.dialog[dcount]
for t in d['dial']:
for sent in [ t['usr']['transcript'],t['sys']['sent'] ]:
# user side & system side
# delexicalise requestable values
sent = self.delexicalise(sent,mode='requestable')
words = sent.split()
# lexical features
lexbi = [(words[i],words[i+1]) for i in range(len(words)-1)]
lextri= [(words[i],words[i+1],words[i+2]) for i in range(len(words)-2)]
self.bigrams.extend(lexbi)
self.trigrams.extend(lextri)
for s,vs in self.ngs2v:
# delexicalise slot
words = self.delexicaliseOne(sent,self.semidict[s],'<slot>').split()
self.bigrams.extend( [x for x in [(words[i],words[i+1]) \
for i in range(len(words)-1)] if x not in lexbi ])
self.trigrams.extend([x for x in [(words[i],words[i+1],words[i+2]) \
for i in range(len(words)-2)] if x not in lextri])
for v in vs:
# delexicalise value
words = self.delexicaliseOne(sent,self.semidict[v],'<value>').split()
self.bigrams.extend( [x for x in [(words[i],words[i+1]) \
for i in range(len(words)-1)] if x not in lexbi ])
self.trigrams.extend([x for x in [(words[i],words[i+1],words[i+2]) \
for i in range(len(words)-2)] if x not in lextri])
# delexicalise both slot and value
words = self.delexicaliseOne(
self.delexicaliseOne(
sent,self.semidict[v],'<value>'),
self.semidict[s],'<slot>').split()
self.bigrams.extend( [x for x in [(words[i],words[i+1]) \
for i in range(len(words)-1)] if x not in lexbi ])
self.trigrams.extend([x for x in [(words[i],words[i+1],words[i+2]) \
for i in range(len(words)-2)] if x not in lextri])
# set threshold for bigram
counts = dict()
for w in self.bigrams:
counts[w] = counts.get(w, 0) + 1
self.bigrams = sorted([w for w,c in \
sorted(counts.iteritems(),key=operator.itemgetter(1)) if c>7])
# set threshold for trigram
counts = dict()
for w in self.trigrams:
counts[w] = counts.get(w, 0) + 1
self.trigrams= sorted([w for w,c in \
sorted(counts.iteritems(),key=operator.itemgetter(1)) if c>7])
# ngram features
self.ngrams = {}
cnt = 0
for w in self.inputvocab + self.bigrams + self.trigrams:
self.ngrams[w] = cnt
cnt += 1
self.idx2ngs = self.inputvocab + self.bigrams + self.trigrams
def extractNgrams(self,sent):
# delexicalise requestable values first
words = self.delexicalise(sent,mode='requestable').split()
if len(words)==0:
words=['<unk>']
# maximum length
maxlen = -1
# extracting ngram features
nv = []
l_uni = self.indexNgram(self.ngrams,words)
l_bi = self.indexNgram(self.ngrams,zip(words[:-1],words[1:]))
l_tri = self.indexNgram(self.ngrams,zip(words[:-2],words[1:-1],words[2:]))
l_f = l_uni + l_bi + l_tri
for s,vs in self.ngs2v:
# slot delexicalised features
words = self.delexicaliseOne(sent,self.semidict[s],'<slot>').split()
sd_uni = self.indexNgram(self.ngrams,words)
sd_bi = self.indexNgram(self.ngrams,\
zip(words[:-1],words[1:]))
sd_tri = self.indexNgram(self.ngrams,\
zip(words[:-2],words[1:-1],words[2:]))
sd_f = [x for x in sd_uni if x not in l_uni]+\
[x for x in sd_bi if x not in l_bi]+\
[x for x in sd_tri if x not in l_tri]
for v in vs:
# incorporating all kinds of features
fv = l_f + sd_f
#fv = sd_f
# value delexicalised features
words = self.delexicaliseOne(sent,self.semidict[v],'<value>').split()
vd_uni = self.indexNgram(self.ngrams,words)
vd_bi = self.indexNgram(self.ngrams,\
zip(words[:-1],words[1:]))
vd_tri = self.indexNgram(self.ngrams,\
zip(words[:-2],words[1:-1],words[2:]))
fv.extend([x for x in vd_uni if x not in l_uni])
fv.extend([x for x in vd_bi if x not in l_bi] )
fv.extend([x for x in vd_tri if x not in l_tri])
# slot & value delexicalised features
words = self.delexicaliseOne(
self.delexicaliseOne(
sent,self.semidict[v],'<value>'),
self.semidict[s],'<slot>').split()
svd_uni = self.indexNgram(self.ngrams,words)
svd_bi = self.indexNgram(self.ngrams,\
zip(words[:-1],words[1:]))
svd_tri = self.indexNgram(self.ngrams,\
zip(words[:-2],words[1:-1],words[2:]))
fv.extend([x for x in svd_uni if x not in fv])
fv.extend([x for x in svd_bi if x not in fv])
fv.extend([x for x in svd_tri if x not in fv])
nv.append(fv)
if maxlen<len(fv):
maxlen = len(fv)
return nv, maxlen
def loadNgrams(self):
# user ngrams features
self.ngram_source = []
self.ngram_source_cutoffs = []
# previous system response
self.ngram_target = []
self.ngram_target_cutoffs = []
# for each dialogue
dcount = 0.0
for d in self.dialog:
# print loading msgs
dcount += 1.0
print '\tloading n-gram features from file ... finishing %.2f%%\r'%\
(100.0*float(dcount)/float(len(self.dialog))),
sys.stdout.flush()
# container for each turn
ng_src = []
ng_tar = []
maxfeat= -1
# for each turn in a dialogue
for t in range(len(d['dial'])):
turn = d['dial'][t]
# sys n-grams
sent = self.delexicalise(turn['sys']['sent'],mode='requestable')
nv,maxlen = self.extractNgrams(sent)
ng_tar.append([nv])
if maxfeat<maxlen:
maxfeat = maxlen
# current user n-grams
sent = self.delexicalise(turn['usr']['transcript'],mode='requestable')
nv,maxlen = self.extractNgrams(sent)
ng_src.append([nv])
if maxfeat<maxlen:
maxfeat = maxlen
# ngram features
ng_src_cut = []
for i in range(len(ng_src)):
ng_src_cut.append([len(x) for x in ng_src[i][0]])
for j in range(len(ng_src[i][0])):
ng_src[i][0][j].extend( [-1]*(maxfeat-len(ng_src[i][0][j])) )
ng_tar_cut = []
for i in range(len(ng_tar)):
ng_tar_cut.append([len(x) for x in ng_tar[i][0]])
for j in range(len(ng_tar[i][0])):
ng_tar[i][0][j].extend( [-1]*(maxfeat-len(ng_tar[i][0][j])) )
# entire dialogue matrix
self.ngram_source.append(ng_src)
self.ngram_source_cutoffs.append(ng_src_cut)
self.ngram_target.append(ng_tar)
self.ngram_target_cutoffs.append(ng_tar_cut)
print
allvoc = self.inputvocab + self.bigrams + self.trigrams + ['']
for i in range(len(self.ngram_source)):
for j in range(len(self.ngram_source[i])):
scut = self.sourcecutoffs[i][j]
ngfeat = self.ngram_source[i][j][0]
for v in range(len(ngfeat)):
print [allvoc[x] for x in ngfeat[v]]
print
print ' '.join([self.inputvocab[x] \
for x in self.masked_sourceutts[i][j][:scut]])
tcut = self.masked_targetcutoffs[i][j]
print ' '.join([self.outputvocab[x] \
for x in self.masked_targetutts[i][j][:tcut]])
print
raw_input()
#print ' '.join([self.outputvocab[x]\
# for x in self.masked_targetutts[i][j][:tcut]])
def indexNgram(self,lookup,ngs):
return [lookup[w] for w in filter(lambda w: \
lookup.has_key(w), ngs)]
def decoderWeights(self):
self.decodeweights = []
for d in self.masked_targetutts:# for each dialog
d_weights = []
for t in d: # for each turn
t_weights = []
for w in t: # for each word
if self.outputvocab[w].startswith('['):
t_weights.append(1.0)
else:
t_weights.append(1.0)
d_weights.append(t_weights)
self.decodeweights.append(d_weights)
def pruneNoisyData(self):
processed_dialog = []
turn_preprune = 0
for i in range(len(self.dialog)):
print '\tpreprocessing and filtering dialog data ... finishing %.2f%%\r' %\
(100.0*float(i)/float(len(self.dialog))),
sys.stdout.flush()
dialog = []
j = 0
turn_preprune += len(self.dialog[i]['dial'])
while j < len(self.dialog[i]['dial']):
# collect one turn data
turn = self.dialog[i]['dial'][j]
if j+1>=len(self.dialog[i]['dial']):
nextturn = {'sys':{'DA':[{'slots': [], 'act': 'thankyou'}],
'sent':'thank you goodbye'}}
else:
nextturn = self.dialog[i]['dial'][j+1]
# skip explicit confirmation and null turn
if( turn['usr']['slu']==[{'slots': [], 'act': 'negate'}] or\
turn['usr']['slu']==[{'slots': [], 'act': 'affirm'}] or\
turn['usr']['slu']==[]) and len(dialog)!=0:
turn = dialog[-1]
del dialog[-1]
# skip repeat act
if nextturn['sys']['DA']==[{u'slots': [], u'act': u'repeat'}] or\
nextturn['sys']['DA']==[{u'slots': [], u'act': u'reqmore'}]:
j += 1
continue
# normalising texts
newturn = {'usr':turn['usr'],'sys':nextturn['sys']}
newturn['usr']['transcript'] = normalize(newturn['usr']['transcript'])
newturn['sys']['sent'] = normalize(newturn['sys']['sent'])
# check mismatch, if yes, discard it
mismatch = False
tochecks = {'food':None,'pricerange':None,'area':None}
for da in newturn['usr']['slu']:
for s,v in da['slots']:
if tochecks.has_key(s) and v!='dontcare':
tochecks[s] = v
for da in newturn['sys']['DA']:
for s,v in da['slots']:
if tochecks.has_key(s):
if tochecks[s]!=None and tochecks[s]!=v:
mismatch = True
break
if mismatch==True: # discard it
j+=1
continue
# adding turn to dialog
if len(dialog)==0:
dialog.append(newturn)
else:
if newturn['usr']['transcript']!=dialog[-1]['usr']['transcript'] or\
newturn['sys']['sent'] != dialog[-1]['sys']['sent']:
dialog.append(newturn)
j += 1
processed_dialog.append(dialog)
# substitute with processed dialog data
turn_postprune = 0
for i in range(len(processed_dialog)):
turn_postprune += len(processed_dialog[i])
self.dialog[i]['dial'] = processed_dialog[i]
print
print '\t\tpre-prune turn number :\t%d' % turn_preprune
print '\t\tpost-prune turn number :\t%d' % turn_postprune
"""
#########################################################################
#########################################################################
#########################################################################
|
from unittest import TestCase
from order import Order, SUPPORTED_TEMPERATURES
from shelf import CAPACITY
from uuid import uuid4
class OrderTestCase(TestCase):
"""
base test class for common test function
"""
@staticmethod
def generate_order():
order1 = {"id": str(uuid4()),
"name": "Cheese Pizza",
"temp": SUPPORTED_TEMPERATURES[0],
"shelfLife": 300,
"decayRate": 0.45}
new_order = Order(order1['id'], order1['name'], order1['temp'],
order1['shelfLife'], order1['decayRate'])
return new_order
@staticmethod
def add_orders_to_capacity(shelf1, capacity=CAPACITY):
for _ in range(capacity):
shelf1.add_order(OrderTestCase.generate_order())
|
import hug
from . import api
hug.API(__name__).extend(api)
# Public API
from .api import get_labels, get_level, get_levels
|
import os
import subprocess
import tempfile
tf = tempfile.TemporaryFile()
proc_obj = subprocess.Popen('ls', stdout=-1)
proc_obj2 = subprocess.Popen(['wc', '-l'], stdin=proc_obj.stdout.fileno(), stdout=-1)
print proc_obj2.stdout.read()
|
import numpy as np
import matplotlib.pyplot as plt
import os
RUN_PATH = './RUNS/'
data = []
for fn in os.listdir(RUN_PATH):
try:
N, M, B = fn.split('_')
except ValueError:
continue
# extract speedup
if os.path.isfile(RUN_PATH + fn + '/out'):
with open(RUN_PATH + fn + '/out', 'r') as out:
s = out.read()
if len(s) > 0:
speedup = float(s.replace('\n', '')[-4:])
else:
continue
# extract L1 misses
if os.path.isfile(RUN_PATH + fn + '/L1'):
with open(RUN_PATH + fn + '/L1', 'r') as out:
s = out.read()
if len(s) > 0:
for line in s.split('\n'):
if 'optMultiplication' in line:
l1_opt = float(" ".join(line.split()).split(' ')[2])
elif 'naiveMultiplication' in line:
l1_naiv = float(" ".join(line.split()).split(' ')[2])
# extract L2 misses
if os.path.isfile(RUN_PATH + fn + '/L2'):
with open(RUN_PATH + fn + '/L2', 'r') as out:
s = out.read()
if len(s) > 0:
for line in s.split('\n'):
if 'optMultiplication' in line:
l2_opt = float(" ".join(line.split()).split(' ')[2])
elif 'naiveMultiplication' in line:
l2_naiv = float(" ".join(line.split()).split(' ')[2])
# extract L3 misses
if os.path.isfile(RUN_PATH + fn + '/L3'):
with open(RUN_PATH + fn + '/L3', 'r') as out:
s = out.read()
if len(s) > 0:
for line in s.split('\n'):
if 'optMultiplication' in line:
l3_opt = float(" ".join(line.split()).split(' ')[2])
elif 'naiveMultiplication' in line:
l3_naiv = float(" ".join(line.split()).split(' ')[2])
data.append([int(N), int(M), int(B), speedup,
l1_naiv, l1_opt,
l2_naiv, l2_opt,
l3_naiv, l3_opt])
data = np.asarray(data)
np.savetxt('data.txt', data,
header='N M B speedup l1_naiv l1_opt l2_naiv l2_opt l3_naiv l3_opt')
|
from torch import torch, nn, optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
from src.torch.torch_models.fc_model import NNetwork
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.FashionMNIST('datasets/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('datasets/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
# Defining the loss
criterion = nn.NLLLoss()
model = NNetwork()
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.Adam(model.parameters(), lr=0.002)
epochs = 4
train_losses, test_losses = [], []
for i in range(epochs):
running_loss = 0
for img, label in iter(trainloader):
output = model(img)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for images, labels in testloader:
test_output = model(images)
test_loss += criterion(test_output, labels)
ps = torch.exp(test_output)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
model.train()
train_losses.append(running_loss / len(trainloader))
test_losses.append(test_loss / len(testloader))
print("Epoch: {}/{}.. ".format(i + 1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss / len(trainloader)),
"Test Loss: {:.3f}.. ".format(test_loss / len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy / len(testloader)))
plt.plot(train_losses, label='Training loss')
plt.plot(test_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
print("The state dict keys: \n\n", model.state_dict().keys())
torch.save(model.state_dict(), 'models/uda_l4_14.pth')
|
#!/usr/bin/env python3
# import pandas as pd
# import os
import argparse
from azkaban.azkabans import Flow, Project
from azkaban.utils import *
from azkaban.azssh import restart_azkaban
def update_project(prj_nm):
"""
更新项目的元数据,已经设置的计划会自动按新元数据执行
:param prj_nm: 项目名称
"""
zip_path = crt_job_file(prj_nm)
if zip_path:
prj = Project(prj_nm)
prj.create_prj() # 项目存在则不新建
prj_old_flow_cnt = len(prj.fetch_flow())
prj.upload_zip(zip_path) # 上传zip文件
prj_new_flow_cnt = len(prj.fetch_flow())
shutil.rmtree(zip_path.replace(".zip", "")) # 清空项目对应的临时目录,一般是temp目录下
if prj_new_flow_cnt > 1:
logger.warning(prj_nm + "一个项目出现多个工作流,不符合我们的业务规则。请以某个工作流或者end_flow作为结束")
if 0 < prj_old_flow_cnt != prj_new_flow_cnt:
logger.warning(prj_nm + "项目上传后工作流数量产生了变化,注意查看并确认是否修改定时任务")
if prj_new_flow_cnt == 1 and len(prj.fetch_flow_schedule()) < 1:
logger.warning("没有设置执行计划,将按照指定的文件配置设定执行计划")
prj.schedule_flows()
else:
logger.error(prj_nm + " : job文件生成失败或者压缩文件失败")
def schedule_project(prj_nm, flows=None, cron=None):
"""
更新项目的元数据,已经设置的计划会自动按新元数据执行
:param prj_nm: 是项目名字 如dw
:param flows : 需要设置的工作流的,是个list类型,默认None就是给这个项目所有的工作流设执行计划
:param cron: 是crom 的时间格式,默认的None就是从config读取配置时间.
格式:秒 分 时 日 月 周 (周和日必须有个一是?无效,暂时不支持到秒级统一给0)
"""
if prj_nm in get_projects():
prj = Project(prj_nm)
prj_new_flow_cnt = len(prj.fetch_flow())
if prj_new_flow_cnt > 1:
logger.warning(prj_nm + "一个项目出现多个工作流,不符合我们的业务规则。请以某个工作流或者end_flow作为结束")
# if prj_new_flow_cnt == 1 and len(prj.fetch_flow_schedule()) < 1:
logger.warning("设置执行计划,将按照指定的文件配置设定执行计划")
prj.schedule_flows(cron=cron, flows=flows)
else:
logger.error(prj_nm + "项目还没有创建,没有找到相关信息")
def exec_project(prj_nm, flows=None, flow_override=None, disabled=None):
"""执行项目
:param prj_nm: 是项目名字 如dw
:param flows : 需要执行的工作流的,是个list类型,默认None就是给这个项目所有的工作流设执行
:param flow_override: 是数据字典类,可以覆盖全局变量的参数,eg.{"etl_dt":'2019-07-18'}
:param disabled: job name 的list类型,选择跳过哪些job,eg.['start','el_crm']
"""
logger.info("prj_nm={0} flows={1} flow_override={2} disabled={3}".format(prj_nm, flows, flow_override, disabled))
if prj_nm in get_projects():
prj = Project(prj_nm)
all_flows = prj.fetch_flow()
if flows is None:
flows = all_flows
for f in all_flows:
if f in flows:
fl = Flow(prj_nm, f, disabled=disabled, flow_override=flow_override)
fl.execute()
else:
logger.error(prj_nm + "项目还没有创建,没有找到相关信息")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="远程部署azkaban")
helps = "u 表示更新项目元数据, e 表示执行项目 s 表示给项目添加执行计划,a 激活执行节点 r 重启azkaban"
parser.add_argument("action", type=str, choices=["u", "e", "s", "a", "r"], help=helps)
parser.add_argument("prj_nm", type=str, help="项目名称字符型", default="dw")
parser.add_argument("-f", "--flows", help="工作流的字符列表,eg: \"['a','b']\" ", type=str, default=None)
parser.add_argument("-t", "--crontab", help="cron的定时器格式字符串", type=str, default=None)
parser.add_argument("-i", "--ignore", help="job name的字符列表 eg.\"['a','b']\" ", type=str, default=None)
parser.add_argument("-p", "--param", help="参数传入,数据字典,可以覆盖全局参数 \"{'s':1}\"", type=str, default=None)
args = parser.parse_args()
action = args.action
project = args.prj_nm
flows_list = eval_str(args.flows) # help="工作流的字符列表,eg: \"['a','b']\"
ignore = eval_str(args.ignore) # help="job name的字符列表 eg.\"['a','b']\" "
param = eval_str(args.param) # "参数传入,数据字典,可以覆盖全局参数 \"{'s':1}\""
if action == "u":
# 更新元数据
update_project(project) # 上传新项目后,会自动加入定时任务。如果有特殊需求只发布不加定时任务的。需要手动删除
elif action == "e":
# 执行工作流
if param is None or type(param) == dict:
exec_project(project, flows=flows_list, flow_override=param, disabled=ignore)
elif action == "s":
# 设置执行计划
schedule_project(project, flows=flows_list, cron=args.crontab)
elif action == "a":
# 激活所有的执行节点
if project == "all":
active_executor()
else:
active_executor(project, port=12321)
elif action == "r":
# 设置执行计划
if project == "all":
restart_azkaban()
else:
restart_azkaban()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import atexit
import time
import argparse
from client import Client
import random
import sys
import os
import math
import time
import json
import socket
# # -*- coding: utf-8 -*-
# """This file contains the client class used by the Expanding Nim game
# This class can either be instantiated and used in Python or controlled
# via the command line.
# @author: Munir Contractor <mmc691@nyu.edu>
# """
# initial_game_status_displayed = False
# class Client():
# """The client class for the Expanding Nim game"""
# DATA_SIZE = 1024
# def __init__(self, name, goes_first, server_address):
# """
# Args:
# **name:** The name you want to give your player\n
# **goes_first:** Boolean indicator whether you take the first move
# or not\n
# **server_address:** A tuple of the form (address, port) of the
# server
# """
# self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.socket.connect(server_address)
# self.__order = 0 if goes_first else 1
# self.__send_json({'name': name, 'order': self.__order})
# init_status = self.receive_move()
# self.init_stones = init_status['init_stones']
# self.init_resets = init_status['init_resets']
# def close(self):
# self.socket.close()
# def __del__(self):
# self.close()
# def __send_json(self, json_object):
# """Helper method to send an object to the server as JSON"""
# self.socket.sendall(bytes(json.dumps(json_object), 'utf-8'))
# def make_move(self, num_stones, reset=False):
# """Sends your move to the server and waits for the opponent to move
# The return value is dict containing the keys as follows:
# ``finished``: Boolean indicator whether the game is over or not\n
# ``stones_left``: Stones left in the game\n
# ``current_max``: New current max value\n
# ``reset_used``: Boolean indicator (should be same as input)\n
# ``stones_removed``: Number of stones removed (should match
# the input)\n
# If the ``finished`` indicator evaluates to ``True``, two extra keys,
# ``winner`` and ``reason`` will be included to indicate the winning
# player and the reason for the win.
# Args:
# **num_stones:** The number of stones to remove.\n
# **reset:** Boolean indicator whether you want to use reset or not.
# Return:
# A dict containing the keys described above
# """
# self.__send_json({'order': self.__order, 'num_stones': num_stones,
# 'reset': reset})
# return self.receive_move()
# def receive_move(self):
# """Receives a move and the state of the game after the move
# The return value is dict containing the keys as follows:
# ``finished``: Boolean indicator whether the game is over or not\n
# ``stones_left``: Stones left in the game\n
# ``current_max``: New current max value\n
# ``reset_used``: Boolean indicator whether reset was used in the
# move\n
# ``stones_removed``: Number of stones removed in the move\n
# If the ``finished`` indicator evaluates to ``True``, two extra keys,
# ``winner`` and ``reason`` will be included to indicate the winning
# player and the reason for the win.
# Return:
# A dict containing the keys described above
# """
# # try:
# message_string = json.loads(self.socket.recv(self.DATA_SIZE).decode('utf-8'))
# # except:
# # import pdb; pdb.set_trace()
# global initial_game_status_displayed
# if not initial_game_status_displayed:
# initial_game_status_displayed = True
# print("Game mode\n%d stones and %d resets\nGood luck have fun!" % (message_string['init_stones'], message_string['init_resets']))
# return message_string
# def __read_move(self):
# try:
# move = input('Please enter your move: ').split(' ')
# return int(move[0]), bool(int(move[1]))
# except Exception:
# print('Invalid move string')
# return self.__read_move()
# def send_move(self):
# """Reads a move from stdin and sends it to the server
# The move has to be in the form '%d %d' where the first number
# is the number of stones to remove and second number is a boolean
# flag for whether reset should be done. The move and the result
# are printed out.
# """
# move = self.__read_move()
# status = self.make_move(move[0], move[1])
# print('You took %d stones%s' % (move[0],
# ' and used reset.' if move[1] else '.'))
# print('Current max: %d' % status['current_max'])
# print('Stones left: %d' % status['stones_left'])
# print('Player %s has %d resets left' % (status['player_0']['name'], status['player_0']['resets_left']))
# print('Player %s has %d resets left' % (status['player_1']['name'], status['player_1']['resets_left']))
# print('---------------------------------------')
# if status['finished']:
# print('Game over\n%s' % status['reason'])
# exit(0)
# def get_move(self):
# """Gets the move made by the opponent and prints it out"""
# status = self.receive_move()
# print('Opponent took %d stones%s' % (status['stones_removed'],
# ' and used reset.' if status['reset_used'] else '.'))
# print('Current max: %d' % status['current_max'])
# print('Stones left: %d' % status['stones_left'])
# print('Player %s has %d resets left' % (status['player_0']['name'], status['player_0']['resets_left']))
# print('Player %s has %d resets left' % (status['player_1']['name'], status['player_1']['resets_left']))
# print('---------------------------------------')
# if status['finished']:
# print('Game over\n%s' % status['reason'])
# exit(0)
#0: reset; 1: not rest
#mem denote the result state of after playing 1 means after move wins; -1 means after move lose
class DecisionMaker():
def __init__(self):
pass
#self.mem = [[[[1/2 for _ in range(2)] for _ in range(4)] for _ in range(4)]for _ in range(1001)]
#init
#for i in range(2):
# for x in range(4):
# for y in range(4):
# self.mem[0][x][y][i] = 1
# self.mem[1][x][y][i] = 0
# self.mem[2][x][y][i] = 0
# self.mem[3][x][y][i] = 0
# self.mem[4][x][y][1] = 1
# self.mem[4][x][y][0] = 0 if curmax>=3 else 1
def makeDecision(self, game_state, falseArg):
curmax = game_state['current_max']
stones = game_state['stones_left']
reset = game_state['reset_used']
leftreset = game_state['player_0']['resets_left']
otherreset = game_state['player_1']['resets_left']
left_time = float(120.00) - float(game_state['player_0']['time_taken'])
time_broken = 0.0001
if reset or curmax < 3:
maxstep = 3
else:
maxstep = curmax +1
threshhold = 3*curmax + 1
if maxstep >= stones:
return stones, False
if stones-4 <= maxstep and leftreset>0:
print('maxstep')
return stones-4, True
if stones <= threshhold and left_time >= time_broken:
print("begin calculate")
self.mem = [[[[float(0.5) for x in range(2)] for _ in range(leftreset + 1)] for _ in range(otherreset + 1)] for _ in range(max(stones+1, 5))]
# import pdb; pdb.set_trace()
for i in range(2):
for x in range(leftreset):
for y in range(otherreset):
self.mem[0][x][y][i] = float(1)
self.mem[1][x][y][i] = float(0)
self.mem[2][x][y][i] = float(0)
self.mem[3][x][y][i] = float(0)
self.mem[4][x][y][1] = float(1)
self.mem[4][x][y][0] = float(0) if curmax >= 3 else float(1)
#if curmax >= 3:
# self.mem[4][leftreset][otherreset][0] = 0
#else
# self.mem[4][leftreset][otherreset][0] = 1
for i in range(1, leftreset+1):
if 4*i<stones+1:
self.mem[4*i][leftreset][otherreset][1] = float(1)
for i in range(5, min(4*otherreset+3, stones+1)):
if (i%4)!=0:
self.mem[i][leftreset][otherreset][1] = float(0)
#if leftreset > otherreset & 4*leftreset + maxstep <= stones:
#for i in range(4*leftreset, max(4*leftreset, stones)):
#self.mem[i][leftreset][otherreset][1] = 1
#True is my turn
#start_time = time.time()
score, state, reset = self.lookahead(stones, maxstep, leftreset, otherreset, True)
#end_time = time.time()
move = stones - state
#print(end_time-start_time)
#print("judge %r" % (end_time-start_time < 1))
print('Score: %f, State: %d, Reset: %r' % (score, state, reset))
# something to prevent corner case.... Algorithm maybe wrong...
if move == 0:
move = random.randint(1, maxstep)
if(stones > (leftreset+1)*(curmax+1)):
reset = False
if state <= max(move, curmax, 3)+1 or (state - 4 <= max(move, curmax, 3)+1 and otherreset>0):
reset = True
if leftreset<=0:
reset = False
return move, reset
elif left_time < time_broken:
if leftreset > 0:
return random.randint(math.floor(maxstep/2), maxstep), True
else:
return random.randint(math.floor(maxstep/2), maxstep), False
elif stones > threshhold: #do something here
if not reset:
return random.randint(math.floor(maxstep/2), maxstep), False
if reset and leftreset > 0:
return random.randint(1, maxstep), True
else:
return random.randint(1, maxstep), False
def lookahead(self, stone, maxstep, leftreset, otherreset, turn):
#check whether we could win by reset
if stone <= 0:
return 1, 0, False
# Small Mitigation due to last minute bugs related to out of index look up
try:
if self.mem[stone][leftreset][otherreset][0] != float(0.5): #back to some where let other lose
#print("root!!!")
return self.mem[stone][leftreset][otherreset][0], stone, False
if self.mem[stone][leftreset][otherreset][1] != float(0.5): #back to some where let other lose
#print("root!!")
return self.mem[stone][leftreset][otherreset][1], stone, True
except:
print("Did not look up old value correctly")
return float(0.5), stone, False
# import pdb; pdb.set_trace()
s1 =float(0)
move1 = stone
reset1 = False
s2 = float(0)
move2 = stone
reset2 = True
count1 = float(0)
count2 = float(0)
score1 = float(0)
score2 = float(0)
for i in range(maxstep, 0, -1):
if turn:
score1, state1, resetchoice1 = self.lookahead(stone-i, maxstep, leftreset, otherreset, not turn)
count1 += score1
if leftreset>=1:
score2, state2, resetchoice2 = self.lookahead(stone-i, 3, leftreset-1, otherreset, not turn)
count2 += score2
s1 = score1 if score1>=s1 else s1
move1 = stone-i if score1>=s1 else move1
#reset1 = resetchoice1 if score1>s1 else False
s2 = score2 if score2>=s2 else s2
move2 = stone-i if score2>=s2 else move2
#reset = False if s1>s2 else True
else:
score1, state1, resetchoice1 = self.lookahead(stone-i, maxstep, leftreset, otherreset, not turn)
count1 += 1-score1
if otherreset>=1:
score2, state2, resetchoice2 = self.lookahead(stone-i, 3, leftreset, otherreset-1, not turn)
count2 += 1-score2
s1 = 1-score1 if 1-score1>=s1 else s1
move1 = stone-i if 1-score1>=s1 else move1
#reset1 = resetchoice1 if 1-score1>s1 else False
s2 = 1-score2 if 1-score2>=s2 else s2
move2 = stone-i if 1-score2>=s2 else move2
#reset = False if s1>s2 else True
self.mem[stone][leftreset][otherreset][0] = float(count1)/float(maxstep)
self.mem[stone][leftreset][otherreset][1] = float(count2)/float(maxstep)
#print("not reset", self.mem[stone][leftreset][otherreset][0])
#print("reset", self.mem[stone][leftreset][otherreset][1])
if s1>s2:
finalstate = move1
else:
finalstate = move2
if self.mem[stone][leftreset][otherreset][0]>self.mem[stone][leftreset][otherreset][1]:
finalscore = self.mem[stone][leftreset][otherreset][0]
finalreset = False
else:
finalscore = self.mem[stone][leftreset][otherreset][1]
finalreset = True
return finalscore, finalstate, finalreset
decision_maker = DecisionMaker()
def check_game_status(game_state):
if game_state['finished']:
print(game_state['reason'])
exit(0)
def my_algo(game_state, goes_first):
"""This function contains your algorithm for the game"""
"""
game state looks something like this
{
'stones_left': 4,
'current_max': 3,
'stones_removed': 3,
'finished': False,
'player_0': {'time_taken': 0.003, 'name': 'my name', 'resets_left': 2},
'player_1': {'time_taken': 13.149, 'name': 'b2', 'resets_left': 1},
'reset_used': True
'init_max': 3
}
"""
print(game_state)
return decision_maker.makeDecision(game_state, False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--first', action='store_true', default=False,
help='Indicates whether client should go first')
parser.add_argument('--ip', type=str, default= '127.0.0.1')
parser.add_argument('--port', type=int, default= 9000)
parser.add_argument('--name', type=str, default= "Lily")
args = parser.parse_args()
# Read these from stdin to make life easier
goes_first = args.first
ip = args.ip
port = args.port
name = args.name if args.first else 'Lily2'
client = Client(name, goes_first, (ip, port))
atexit.register(client.close)
stones = client.init_stones
resets = client.init_resets
if goes_first:
num_stones = random.randint(1, 3) if client.init_stones > 3 else 3
num_stones = num_stones if client.init_stones - 4 > 3 else client.init_stones - 4
shouldReset = True if client.init_stones - num_stones == 4 else False
check_game_status(client.make_move(num_stones, shouldReset))
while True:
game_state = client.receive_move()
check_game_status(game_state)
# Some parsing logic to convert game state to algo_inputs
num_stones, reset = my_algo(game_state, goes_first)
num_stones = max(1, num_stones)
ourPlayerGameState = game_state['player_0'] if args.first else game_state['player_1']
reset = reset if ourPlayerGameState['resets_left'] > 0 else False
print('You took %d stones%s' % (num_stones,
' and used reset.' if reset else '.'))
print('Current max: %d' % game_state['current_max'])
print('Stones left: %d' % game_state['stones_left'])
print('Player %s has %d resets left' % (game_state['player_0']['name'], game_state['player_0']['resets_left']))
print('Player %s has %d resets left' % (game_state['player_1']['name'], game_state['player_1']['resets_left']))
print('---------------------------------------')
if game_state['finished']:
print('Game over\n%s' % game_state['reason'])
exit(0)
check_game_status(client.make_move(num_stones, reset))
|
# Generated by Django 3.1.7 on 2021-05-23 05:42
from django.db import migrations
import phone_field.models
class Migration(migrations.Migration):
dependencies = [
('student_registration', '0013_merge_20210523_0052'),
]
operations = [
migrations.AlterField(
model_name='studentdetails',
name='referee_phone_number',
field=phone_field.models.PhoneField(blank=True, help_text='Referee Mobile Number', max_length=31, null=True),
),
]
|
N = int(input())
answers = list(map(str,input()))
# 상근이 창영이 현진이의 리스트를 만들어 답을 반복적으로 넣어둔다.
Adrian = [] # 상근
Bruno = [] # 창영
Goran = [] # 현진
a_cnt = 0
b_cnt = 0
g_cnt = 0
for i in range(33):
Adrian.append('A')
Adrian.append('B')
Adrian.append('C')
Adrian.append('A')
for i in range(25):
Bruno.append('B')
Bruno.append('A')
Bruno.append('B')
Bruno.append('C')
for i in range(16):
Goran.append('C')
Goran.append('C')
Goran.append('A')
Goran.append('A')
Goran.append('B')
Goran.append('B')
Goran.append('C')
Goran.append('C')
Goran.append('B')
Goran.append('B')
for i in range(len(answers)):
if answers[i] == Adrian[i]:
a_cnt += 1
if answers[i] == Bruno[i]:
b_cnt += 1
if answers[i] == Goran[i]:
g_cnt += 1
result = max(a_cnt, b_cnt, g_cnt)
name_result = []
if a_cnt == result:
name_result.append('Adrian')
if b_cnt == result:
name_result.append('Bruno')
if g_cnt == result:
name_result.append('Goran')
print(result)
for i in range(len(name_result)):
print(name_result[i])
# 나머지 연산을 활용해서 간단하게 만들어보자!!
|
import random
tries = 1
npcNum = random.randint(1, 10)
while True:
guess = input("Guess the number! ")
guess = int(guess)
if guess == npcNum:
print(f"Yup, I picked {npcNum}! You win!")
print(f"It took you {tries} tries.")
break
else:
print("Nope, try again!")
tries += 1
|
class Solution:
def solve(self, s):
res = []
subs = [s[i: j] for i in range(len(s))
for j in range(i + 1, len(s) + 1)]
subs_ = []
for el in subs:
el = sorted(el)
subs_.append(''.join(el))
for i, el in enumerate(subs):
tmp = subs_[i]
del(subs_[i])
if ''.join(sorted(el)) in subs_:
res.append(el)
subs_.insert(i, tmp)
res = sorted(res)
return res
|
import sqlite3
import Tkinter
import tkMessageBox
class App:
def __init__(self, master):
self.word = Tkinter.Button(text="Translate",
command=lambda: self.
get_name('translation.db', raw_input(
"English word: ")))
self.word.pack(side=Tkinter.LEFT)
self.button = Tkinter.Button(text="QUIT", fg="red",
command=quit)
self.button.pack(side=Tkinter.LEFT)
def get_name(self, database_file, word_eng):
self.database_file = database_file
self.word_eng = word_eng
query = "SELECT english || ' ' || spanish FROM Translation \
WHERE english=?;"
connection = sqlite3.connect(database_file)
connection.text_factory = str
cursor = connection.cursor()
cursor.execute(query, [word_eng])
results = [r[0] for r in cursor.fetchall()]
cursor.close()
connection.close()
tkMessageBox.showinfo("translation: ", results)
root = Tkinter.Tk()
app = App(root)
root.mainloop()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import os
import shutil
import subprocess
from subprocess import Popen, PIPE
import time
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import json
from knack.util import CLIError
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_y_n
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import send_raw_request
from azure.cli.core import telemetry
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
from msrest.exceptions import AuthenticationError, HttpOperationError, TokenExpiredError
from msrest.exceptions import ValidationError as MSRestValidationError
from kubernetes.client.rest import ApiException
from azext_connectedk8s._client_factory import resource_providers_client, cf_resource_groups
import azext_connectedk8s._constants as consts
from kubernetes import client as kube_client
from azure.cli.core import get_default_cli
from azure.cli.core.azclierror import CLIInternalError, ClientRequestError, ArgumentUsageError, ManualInterrupt, AzureResponseError, AzureInternalError, ValidationError
logger = get_logger(__name__)
# pylint: disable=line-too-long
# pylint: disable=bare-except
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = consts.DEFAULT_REQUEST_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
def validate_location(cmd, location):
subscription_id = os.getenv('AZURE_SUBSCRIPTION_ID') if os.getenv('AZURE_ACCESS_TOKEN') else get_subscription_id(cmd.cli_ctx)
rp_locations = []
resourceClient = resource_providers_client(cmd.cli_ctx, subscription_id=subscription_id)
try:
providerDetails = resourceClient.get('Microsoft.Kubernetes')
except Exception as e: # pylint: disable=broad-except
arm_exception_handler(e, consts.Get_ResourceProvider_Fault_Type, 'Failed to fetch resource provider details')
for resourceTypes in providerDetails.resource_types:
if resourceTypes.resource_type == 'connectedClusters':
rp_locations = [location.replace(" ", "").lower() for location in resourceTypes.locations]
if location.lower() not in rp_locations:
telemetry.set_exception(exception='Location not supported', fault_type=consts.Invalid_Location_Fault_Type,
summary='Provided location is not supported for creating connected clusters')
raise ArgumentUsageError("Connected cluster resource creation is supported only in the following locations: " +
', '.join(map(str, rp_locations)), recommendation="Use the --location flag to specify one of these locations.")
break
def validate_custom_token(cmd, resource_group_name, location):
if os.getenv('AZURE_ACCESS_TOKEN'):
if os.getenv('AZURE_SUBSCRIPTION_ID') is None:
telemetry.set_exception(exception='Required environment variables and parameters are not set', fault_type=consts.Custom_Token_Environments_Fault_Type,
summary='Required environment variables and parameters are not set')
raise ValidationError("Environment variable 'AZURE_SUBSCRIPTION_ID' should be set when custom access token is enabled.")
if os.getenv('AZURE_TENANT_ID') is None:
telemetry.set_exception(exception='Required environment variables and parameters are not set', fault_type=consts.Custom_Token_Environments_Fault_Type,
summary='Required environment variables and parameters are not set')
raise ValidationError("Environment variable 'AZURE_TENANT_ID' should be set when custom access token is enabled.")
if location is None:
try:
resource_client = cf_resource_groups(cmd.cli_ctx, os.getenv('AZURE_SUBSCRIPTION_ID'))
rg = resource_client.get(resource_group_name)
location = rg.location
except Exception as ex:
telemetry.set_exception(exception=ex, fault_type=consts.Location_Fetch_Fault_Type,
summary='Unable to fetch location from resource group')
raise ValidationError("Unable to fetch location from resource group: ".format(str(ex)))
return True, location
return False, location
def get_chart_path(registry_path, kube_config, kube_context, helm_client_location, chart_folder_name='AzureArcCharts', chart_name='azure-arc-k8sagents'):
# Pulling helm chart from registry
os.environ['HELM_EXPERIMENTAL_OCI'] = '1'
pull_helm_chart(registry_path, kube_config, kube_context, helm_client_location, chart_name)
# Exporting helm chart after cleanup
chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', chart_folder_name)
try:
if os.path.isdir(chart_export_path):
shutil.rmtree(chart_export_path)
except:
logger.warning("Unable to cleanup the {} already present on the machine. In case of failure, please cleanup the directory '{}' and try again.".format(chart_folder_name, chart_export_path))
export_helm_chart(registry_path, chart_export_path, kube_config, kube_context, helm_client_location, chart_name)
# Returning helm chart path
helm_chart_path = os.path.join(chart_export_path, chart_name)
if chart_folder_name == consts.Pre_Onboarding_Helm_Charts_Folder_Name:
chart_path = helm_chart_path
else:
chart_path = os.getenv('HELMCHART') if os.getenv('HELMCHART') else helm_chart_path
return chart_path
def pull_helm_chart(registry_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents', retry_count=5, retry_delay=3):
cmd_helm_chart_pull = [helm_client_location, "chart", "pull", registry_path]
if kube_config:
cmd_helm_chart_pull.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_chart_pull.extend(["--kube-context", kube_context])
for i in range(retry_count):
response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE)
_, error_helm_chart_pull = response_helm_chart_pull.communicate()
if response_helm_chart_pull.returncode != 0:
if i == retry_count - 1:
telemetry.set_exception(exception=error_helm_chart_pull.decode("ascii"), fault_type=consts.Pull_HelmChart_Fault_Type,
summary="Unable to pull {} helm charts from the registry".format(chart_name))
raise CLIInternalError("Unable to pull {} helm chart from the registry '{}': ".format(chart_name, registry_path) + error_helm_chart_pull.decode("ascii"))
time.sleep(retry_delay)
else:
break
def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents'):
cmd_helm_chart_export = [helm_client_location, "chart", "export", registry_path, "--destination", chart_export_path]
if kube_config:
cmd_helm_chart_export.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_chart_export.extend(["--kube-context", kube_context])
response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=PIPE, stderr=PIPE)
_, error_helm_chart_export = response_helm_chart_export.communicate()
if response_helm_chart_export.returncode != 0:
telemetry.set_exception(exception=error_helm_chart_export.decode("ascii"), fault_type=consts.Export_HelmChart_Fault_Type,
summary='Unable to export {} helm chart from the registry'.format(chart_name))
raise CLIInternalError("Unable to export {} helm chart from the registry '{}': ".format(chart_name, registry_path) + error_helm_chart_export.decode("ascii"))
def save_cluster_diagnostic_checks_pod_description(corev1_api_instance, batchv1_api_instance, helm_client_location, kubectl_client_location, kube_config, kube_context, filepath_with_timestamp, storage_space_available):
try:
job_name = "cluster-diagnostic-checks-job"
all_pods = corev1_api_instance.list_namespaced_pod('azure-arc-release')
# Traversing through all agents
for each_pod in all_pods.items:
# Fetching the current Pod name and creating a folder with that name inside the timestamp folder
pod_name = each_pod.metadata.name
if(pod_name.startswith(job_name)):
describe_job_pod = [kubectl_client_location, "describe", "pod", pod_name, "-n", "azure-arc-release"]
if kube_config:
describe_job_pod.extend(["--kubeconfig", kube_config])
if kube_context:
describe_job_pod.extend(["--context", kube_context])
response_describe_job_pod = Popen(describe_job_pod, stdout=PIPE, stderr=PIPE)
output_describe_job_pod, error_describe_job_pod = response_describe_job_pod.communicate()
if(response_describe_job_pod.returncode == 0):
pod_description = output_describe_job_pod.decode()
if storage_space_available:
dns_check_path = os.path.join(filepath_with_timestamp, "cluster_diagnostic_checks_pod_description.txt")
with open(dns_check_path, 'w+') as f:
f.write(pod_description)
else:
telemetry.set_exception(exception=error_describe_job_pod.decode("ascii"), fault_type=consts.Cluster_Diagnostic_Checks_Pod_Description_Save_Failed, summary="Failed to save cluster diagnostic checks pod description in the local machine")
except OSError as e:
if "[Errno 28]" in str(e):
storage_space_available = False
telemetry.set_exception(exception=e, fault_type=consts.No_Storage_Space_Available_Fault_Type, summary="No space left on device")
shutil.rmtree(filepath_with_timestamp, ignore_errors=False, onerror=None)
else:
logger.warning("An exception has occured while saving the cluster diagnostic checks pod description in the local machine. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Cluster_Diagnostic_Checks_Pod_Description_Save_Failed, summary="Error occured while saving the cluster diagnostic checks pod description in the local machine")
# To handle any exception that may occur during the execution
except Exception as e:
logger.warning("An exception has occured while saving the cluster diagnostic checks pod description in the local machine. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Cluster_Diagnostic_Checks_Pod_Description_Save_Failed, summary="Error occured while saving the cluster diagnostic checks pod description in the local machine")
def check_cluster_DNS(dns_check_log, filepath_with_timestamp, storage_space_available, diagnoser_output):
try:
if consts.DNS_Check_Result_String not in dns_check_log:
return consts.Diagnostic_Check_Incomplete, storage_space_available
formatted_dns_log = dns_check_log.replace('\t', '')
# Validating if DNS is working or not and displaying proper result
if("NXDOMAIN" in formatted_dns_log or "connection timed out" in formatted_dns_log):
logger.warning("Error: We found an issue with the DNS resolution on your cluster. For details about debugging DNS issues visit 'https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/'.\n")
diagnoser_output.append("Error: We found an issue with the DNS resolution on your cluster. For details about debugging DNS issues visit 'https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/'.\n")
if storage_space_available:
dns_check_path = os.path.join(filepath_with_timestamp, consts.DNS_Check)
with open(dns_check_path, 'w+') as dns:
dns.write(formatted_dns_log + "\nWe found an issue with the DNS resolution on your cluster.")
telemetry.set_exception(exception='DNS resolution check failed in the cluster', fault_type=consts.DNS_Check_Failed, summary="DNS check failed in the cluster")
return consts.Diagnostic_Check_Failed, storage_space_available
else:
if storage_space_available:
dns_check_path = os.path.join(filepath_with_timestamp, consts.DNS_Check)
with open(dns_check_path, 'w+') as dns:
dns.write(formatted_dns_log + "\nCluster DNS check passed successfully.")
return consts.Diagnostic_Check_Passed, storage_space_available
# For handling storage or OS exception that may occur during the execution
except OSError as e:
if "[Errno 28]" in str(e):
storage_space_available = False
telemetry.set_exception(exception=e, fault_type=consts.No_Storage_Space_Available_Fault_Type, summary="No space left on device")
shutil.rmtree(filepath_with_timestamp, ignore_errors=False, onerror=None)
else:
logger.warning("An exception has occured while performing the DNS check on the cluster. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Cluster_DNS_Check_Fault_Type, summary="Error occured while performing cluster DNS check")
diagnoser_output.append("An exception has occured while performing the DNS check on the cluster. Exception: {}".format(str(e)) + "\n")
# To handle any exception that may occur during the execution
except Exception as e:
logger.warning("An exception has occured while performing the DNS check on the cluster. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Cluster_DNS_Check_Fault_Type, summary="Error occured while performing cluster DNS check")
diagnoser_output.append("An exception has occured while performing the DNS check on the cluster. Exception: {}".format(str(e)) + "\n")
return consts.Diagnostic_Check_Incomplete, storage_space_available
def check_cluster_outbound_connectivity(outbound_connectivity_check_log, filepath_with_timestamp, storage_space_available, diagnoser_output, outbound_connectivity_check_for='pre-onboarding-inspector'):
try:
if outbound_connectivity_check_for == 'pre-onboarding-inspector':
if consts.Outbound_Connectivity_Check_Result_String not in outbound_connectivity_check_log:
return consts.Diagnostic_Check_Incomplete, storage_space_available
Outbound_Connectivity_Log_For_Cluster_Connect = outbound_connectivity_check_log.split(' ')[0]
# extracting the endpoints for cluster connect feature
Cluster_Connect_Precheck_Endpoint_Url = Outbound_Connectivity_Log_For_Cluster_Connect.split(" : ")[1]
# extracting the obo endpoint response code from outbound connectivity check
Cluster_Connect_Precheck_Endpoint_response_code = Outbound_Connectivity_Log_For_Cluster_Connect.split(" : ")[2]
if(Cluster_Connect_Precheck_Endpoint_response_code != "000"):
if storage_space_available:
cluster_connect_outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check_for_cluster_connect)
with open(cluster_connect_outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + Cluster_Connect_Precheck_Endpoint_response_code + "\nOutbound network connectivity check to cluster connect precheck endpoints passed successfully.")
else:
logger.warning("The outbound network connectivity check has failed for the endpoint - " + Cluster_Connect_Precheck_Endpoint_Url + "\nThis will affect the \"cluster-connect\" feature. If you are planning to use \"cluster-connect\" functionality , please ensure outbound connectivity to the above endpoint.\n")
telemetry.set_exception(exception='Outbound network connectivity check failed for the Cluster Connect endpoint', fault_type=consts.Outbound_Connectivity_Check_Failed_For_Cluster_Connect, summary="Outbound network connectivity check failed for the Cluster Connect precheck endpoint")
if storage_space_available:
cluster_connect_outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check_for_cluster_connect)
with open(cluster_connect_outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + Cluster_Connect_Precheck_Endpoint_response_code + "\nOutbound connectivity failed for the endpoint:" + Cluster_Connect_Precheck_Endpoint_Url + " ,this is an optional endpoint needed for cluster-connect feature.")
Onboarding_Precheck_Endpoint_outbound_connectivity_response = outbound_connectivity_check_log[-1:-4:-1]
Onboarding_Precheck_Endpoint_outbound_connectivity_response = Onboarding_Precheck_Endpoint_outbound_connectivity_response[::-1]
# Validating if outbound connectiivty is working or not and displaying proper result
if(Onboarding_Precheck_Endpoint_outbound_connectivity_response != "000"):
if storage_space_available:
outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check_for_onboarding)
with open(outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + Onboarding_Precheck_Endpoint_outbound_connectivity_response + "\nOutbound network connectivity check to the onboarding precheck endpoint passed successfully.")
return consts.Diagnostic_Check_Passed, storage_space_available
else:
outbound_connectivity_failed_warning_message = "Error: We found an issue with outbound network connectivity from the cluster to the endpoints required for onboarding.\nPlease ensure to meet the following network requirements 'https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#meet-network-requirements' \nIf your cluster is behind an outbound proxy server, please ensure that you have passed proxy parameters during the onboarding of your cluster.\nFor more details visit 'https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#connect-using-an-outbound-proxy-server' \n"
logger.warning(outbound_connectivity_failed_warning_message)
diagnoser_output.append(outbound_connectivity_failed_warning_message)
if storage_space_available:
outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check_for_onboarding)
with open(outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + Onboarding_Precheck_Endpoint_outbound_connectivity_response + "\nWe found an issue with Outbound network connectivity from the cluster required for onboarding.")
telemetry.set_exception(exception='Outbound network connectivity check failed for onboarding', fault_type=consts.Outbound_Connectivity_Check_Failed_For_Onboarding, summary="Outbound network connectivity check for onboarding failed in the cluster")
return consts.Diagnostic_Check_Failed, storage_space_available
elif outbound_connectivity_check_for == 'troubleshoot':
outbound_connectivity_response = outbound_connectivity_check_log[-1:-4:-1]
outbound_connectivity_response = outbound_connectivity_response[::-1]
if consts.Outbound_Connectivity_Check_Result_String not in outbound_connectivity_check_log:
return consts.Diagnostic_Check_Incomplete, storage_space_available
if(outbound_connectivity_response != "000"):
if storage_space_available:
outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check)
with open(outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + outbound_connectivity_response + "\nOutbound network connectivity check passed successfully.")
return consts.Diagnostic_Check_Passed, storage_space_available
else:
outbound_connectivity_failed_warning_message = "Error: We found an issue with outbound network connectivity from the cluster.\nPlease ensure to meet the following network requirements 'https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#meet-network-requirements' \nIf your cluster is behind an outbound proxy server, please ensure that you have passed proxy parameters during the onboarding of your cluster.\nFor more details visit 'https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#connect-using-an-outbound-proxy-server' \n"
logger.warning(outbound_connectivity_failed_warning_message)
diagnoser_output.append(outbound_connectivity_failed_warning_message)
if storage_space_available:
outbound_connectivity_check_path = os.path.join(filepath_with_timestamp, consts.Outbound_Network_Connectivity_Check)
with open(outbound_connectivity_check_path, 'w+') as outbound:
outbound.write("Response code " + outbound_connectivity_response + "\nWe found an issue with Outbound network connectivity from the cluster.")
telemetry.set_exception(exception='Outbound network connectivity check failed', fault_type=consts.Outbound_Connectivity_Check_Failed, summary="Outbound network connectivity check failed in the cluster")
return consts.Diagnostic_Check_Failed, storage_space_available
# For handling storage or OS exception that may occur during the execution
except OSError as e:
if "[Errno 28]" in str(e):
storage_space_available = False
telemetry.set_exception(exception=e, fault_type=consts.No_Storage_Space_Available_Fault_Type, summary="No space left on device")
shutil.rmtree(filepath_with_timestamp, ignore_errors=False, onerror=None)
else:
logger.warning("An exception has occured while performing the outbound connectivity check on the cluster. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Outbound_Connectivity_Check_Fault_Type, summary="Error occured while performing outbound connectivity check in the cluster")
diagnoser_output.append("An exception has occured while performing the outbound connectivity check on the cluster. Exception: {}".format(str(e)) + "\n")
# To handle any exception that may occur during the execution
except Exception as e:
logger.warning("An exception has occured while performing the outbound connectivity check on the cluster. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Outbound_Connectivity_Check_Fault_Type, summary="Error occured while performing outbound connectivity check in the cluster")
diagnoser_output.append("An exception has occured while performing the outbound connectivity check on the cluster. Exception: {}".format(str(e)) + "\n")
return consts.Diagnostic_Check_Incomplete, storage_space_available
def create_folder_diagnosticlogs(time_stamp, folder_name):
try:
# Fetching path to user directory to create the arc diagnostic folder
home_dir = os.path.expanduser('~')
filepath = os.path.join(home_dir, '.azure', folder_name)
# Creating Diagnostic folder and its subfolder with the given timestamp and cluster name to store all the logs
try:
os.mkdir(filepath)
except FileExistsError:
pass
filepath_with_timestamp = os.path.join(filepath, time_stamp)
try:
os.mkdir(filepath_with_timestamp)
except FileExistsError:
# Deleting the folder if present with the same timestamp to prevent overriding in the same folder and then creating it again
shutil.rmtree(filepath_with_timestamp, ignore_errors=True)
os.mkdir(filepath_with_timestamp)
pass
return filepath_with_timestamp, True
# For handling storage or OS exception that may occur during the execution
except OSError as e:
if "[Errno 28]" in str(e):
shutil.rmtree(filepath_with_timestamp, ignore_errors=False, onerror=None)
telemetry.set_exception(exception=e, fault_type=consts.No_Storage_Space_Available_Fault_Type, summary="No space left on device")
return "", False
else:
logger.warning("An exception has occured while creating the diagnostic logs folder in your local machine. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Diagnostics_Folder_Creation_Failed_Fault_Type, summary="Error while trying to create diagnostic logs folder")
return "", False
# To handle any exception that may occur during the execution
except Exception as e:
logger.warning("An exception has occured while creating the diagnostic logs folder in your local machine. Exception: {}".format(str(e)) + "\n")
telemetry.set_exception(exception=e, fault_type=consts.Diagnostics_Folder_Creation_Failed_Fault_Type, summary="Error while trying to create diagnostic logs folder")
return "", False
def add_helm_repo(kube_config, kube_context, helm_client_location):
repo_name = os.getenv('HELMREPONAME')
repo_url = os.getenv('HELMREPOURL')
cmd_helm_repo = [helm_client_location, "repo", "add", repo_name, repo_url]
if kube_config:
cmd_helm_repo.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_repo.extend(["--kube-context", kube_context])
response_helm_repo = Popen(cmd_helm_repo, stdout=PIPE, stderr=PIPE)
_, error_helm_repo = response_helm_repo.communicate()
if response_helm_repo.returncode != 0:
telemetry.set_exception(exception=error_helm_repo.decode("ascii"), fault_type=consts.Add_HelmRepo_Fault_Type,
summary='Failed to add helm repository')
raise CLIInternalError("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii"))
def get_helm_registry(cmd, config_dp_endpoint, release_train_custom=None):
# Setting uri
api_version = "2019-11-01-preview"
chart_location_url_segment = "azure-arc-k8sagents/GetLatestHelmPackagePath?api-version={}".format(api_version)
release_train = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable'
chart_location_url = "{}/{}".format(config_dp_endpoint, chart_location_url_segment)
if release_train_custom:
release_train = release_train_custom
uri_parameters = ["releaseTrain={}".format(release_train)]
resource = cmd.cli_ctx.cloud.endpoints.active_directory_resource_id
headers = None
if os.getenv('AZURE_ACCESS_TOKEN'):
headers = ["Authorization=Bearer {}".format(os.getenv('AZURE_ACCESS_TOKEN'))]
# Sending request with retries
r = send_request_with_retries(cmd.cli_ctx, 'post', chart_location_url, headers=headers, fault_type=consts.Get_HelmRegistery_Path_Fault_Type, summary='Error while fetching helm chart registry path', uri_parameters=uri_parameters, resource=resource)
if r.content:
try:
return r.json().get('repositoryPath')
except Exception as e:
telemetry.set_exception(exception=e, fault_type=consts.Get_HelmRegistery_Path_Fault_Type,
summary='Error while fetching helm chart registry path')
raise CLIInternalError("Error while fetching helm chart registry path from JSON response: " + str(e))
else:
telemetry.set_exception(exception='No content in response', fault_type=consts.Get_HelmRegistery_Path_Fault_Type,
summary='No content in acr path response')
raise CLIInternalError("No content was found in helm registry path response.")
def send_request_with_retries(cli_ctx, method, url, headers, fault_type, summary, uri_parameters=None, resource=None, retry_count=5, retry_delay=3):
for i in range(retry_count):
try:
response = send_raw_request(cli_ctx, method, url, headers=headers, uri_parameters=uri_parameters, resource=resource)
return response
except Exception as e:
if i == retry_count - 1:
telemetry.set_exception(exception=e, fault_type=fault_type, summary=summary)
raise CLIInternalError("Error while fetching helm chart registry path: " + str(e))
time.sleep(retry_delay)
def arm_exception_handler(ex, fault_type, summary, return_if_not_found=False):
if isinstance(ex, AuthenticationError):
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise AzureResponseError("Authentication error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
if isinstance(ex, TokenExpiredError):
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise AzureResponseError("Token expiration error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
if isinstance(ex, HttpOperationError):
status_code = ex.response.status_code
if status_code == 404 and return_if_not_found:
return
if status_code // 100 == 4:
telemetry.set_user_fault()
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
if status_code // 100 == 5:
raise AzureInternalError("Http operation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
raise AzureResponseError("Http operation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
if isinstance(ex, MSRestValidationError):
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise AzureResponseError("Validation error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
if isinstance(ex, HttpResponseError):
status_code = ex.status_code
if status_code == 404 and return_if_not_found:
return
if status_code // 100 == 4:
telemetry.set_user_fault()
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
if status_code // 100 == 5:
raise AzureInternalError("Http response error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
raise AzureResponseError("Http response error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
if isinstance(ex, ResourceNotFoundError) and return_if_not_found:
return
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise ClientRequestError("Error occured while making ARM request: " + str(ex) + "\nSummary: {}".format(summary))
def kubernetes_exception_handler(ex, fault_type, summary, error_message='Error occured while connecting to the kubernetes cluster: ',
message_for_unauthorized_request='The user does not have required privileges on the kubernetes cluster to deploy Azure Arc enabled Kubernetes agents. Please ensure you have cluster admin privileges on the cluster to onboard.',
message_for_not_found='The requested kubernetes resource was not found.', raise_error=True):
telemetry.set_user_fault()
if isinstance(ex, ApiException):
status_code = ex.status
if status_code == 403:
logger.warning(message_for_unauthorized_request)
elif status_code == 404:
logger.warning(message_for_not_found)
else:
logger.debug("Kubernetes Exception: " + str(ex))
if raise_error:
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise ValidationError(error_message + "\nError Response: " + str(ex.body))
else:
if raise_error:
telemetry.set_exception(exception=ex, fault_type=fault_type, summary=summary)
raise ValidationError(error_message + "\nError: " + str(ex))
else:
logger.debug("Kubernetes Exception: " + str(ex))
def validate_infrastructure_type(infra):
for s in consts.Infrastructure_Enum_Values[1:]: # First value is "auto"
if s.lower() == infra.lower():
return s
return None
def get_values_file():
values_file = os.getenv('HELMVALUESPATH')
if (values_file is not None) and (os.path.isfile(values_file)):
logger.warning("Values files detected. Reading additional helm parameters from same.")
# trimming required for windows os
if (values_file.startswith("'") or values_file.startswith('"')):
values_file = values_file[1:]
if (values_file.endswith("'") or values_file.endswith('"')):
values_file = values_file[:-1]
return values_file
return None
def ensure_namespace_cleanup():
api_instance = kube_client.CoreV1Api()
timeout = time.time() + 180
while True:
if time.time() > timeout:
telemetry.set_user_fault()
logger.warning("Namespace 'azure-arc' still in terminating state. Please ensure that you delete the 'azure-arc' namespace before onboarding the cluster again.")
return
try:
api_response = api_instance.list_namespace(field_selector='metadata.name=azure-arc')
if not api_response.items:
return
time.sleep(5)
except Exception as e: # pylint: disable=broad-except
logger.warning("Error while retrieving namespace information: " + str(e))
kubernetes_exception_handler(e, consts.Get_Kubernetes_Namespace_Fault_Type, 'Unable to fetch kubernetes namespace',
raise_error=False)
def delete_arc_agents(release_namespace, kube_config, kube_context, helm_client_location, is_arm64_cluster=False, no_hooks=False):
if(no_hooks):
cmd_helm_delete = [helm_client_location, "delete", "azure-arc", "--namespace", release_namespace, "--no-hooks"]
else:
cmd_helm_delete = [helm_client_location, "delete", "azure-arc", "--namespace", release_namespace]
if is_arm64_cluster:
cmd_helm_delete.extend(["--timeout", "15m"])
if kube_config:
cmd_helm_delete.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_delete.extend(["--kube-context", kube_context])
response_helm_delete = Popen(cmd_helm_delete, stdout=PIPE, stderr=PIPE)
_, error_helm_delete = response_helm_delete.communicate()
if response_helm_delete.returncode != 0:
if 'forbidden' in error_helm_delete.decode("ascii") or 'Error: warning: Hook pre-delete' in error_helm_delete.decode("ascii") or 'Error: timed out waiting for the condition' in error_helm_delete.decode("ascii"):
telemetry.set_user_fault()
telemetry.set_exception(exception=error_helm_delete.decode("ascii"), fault_type=consts.Delete_HelmRelease_Fault_Type,
summary='Unable to delete helm release')
raise CLIInternalError("Error occured while cleaning up arc agents. " +
"Helm release deletion failed: " + error_helm_delete.decode("ascii") +
" Please run 'helm delete azure-arc --namespace {}' to ensure that the release is deleted.".format(release_namespace))
ensure_namespace_cleanup()
# Cleanup azure-arc-release NS if present (created during helm installation)
cleanup_release_install_namespace_if_exists()
def cleanup_release_install_namespace_if_exists():
api_instance = kube_client.CoreV1Api()
try:
api_instance.read_namespace(consts.Release_Install_Namespace)
except Exception as ex:
if ex.status == 404:
# Nothing to delete, exiting here
return
else:
kubernetes_exception_handler(ex, consts.Get_Kubernetes_Helm_Release_Namespace_Fault_Type, error_message='Unable to fetch details about existense of kubernetes namespace: {}'.format(consts.Release_Install_Namespace), summary='Unable to fetch kubernetes namespace: {}'.format(consts.Release_Install_Namespace))
# If namespace exists, delete it
try:
api_instance.delete_namespace(consts.Release_Install_Namespace)
except Exception as ex:
kubernetes_exception_handler(ex, consts.Delete_Kubernetes_Helm_Release_Namespace_Fault_Type, error_message='Unable to clean-up kubernetes namespace: {}'.format(consts.Release_Install_Namespace), summary='Unable to delete kubernetes namespace: {}'.format(consts.Release_Install_Namespace))
# DO NOT use this method for re-put scenarios. This method involves new NS creation for helm release. For re-put scenarios, brownfield scenario needs to be handled where helm release still stays in default NS
def helm_install_release(resource_manager, chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name,
cluster_name, location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem,
kube_config, kube_context, no_wait, values_file, cloud_name, disable_auto_upgrade, enable_custom_locations,
custom_locations_oid, helm_client_location, enable_private_link, arm_metadata, onboarding_timeout="600",
container_log_path=None):
cmd_helm_install = [helm_client_location, "upgrade", "--install", "azure-arc", chart_path,
"--set", "global.subscriptionId={}".format(subscription_id),
"--set", "global.kubernetesDistro={}".format(kubernetes_distro),
"--set", "global.kubernetesInfra={}".format(kubernetes_infra),
"--set", "global.resourceGroupName={}".format(resource_group_name),
"--set", "global.resourceName={}".format(cluster_name),
"--set", "global.location={}".format(location),
"--set", "global.tenantId={}".format(onboarding_tenant_id),
"--set", "global.onboardingPrivateKey={}".format(private_key_pem),
"--set", "systemDefaultValues.spnOnboarding=false",
"--set", "global.azureEnvironment={}".format(cloud_name),
"--set", "systemDefaultValues.clusterconnect-agent.enabled=true",
"--namespace", "{}".format(consts.Release_Install_Namespace),
"--create-namespace",
"--output", "json"]
# Special configurations from 2022-09-01 ARM metadata.
if "dataplaneEndpoints" in arm_metadata:
notification_endpoint = arm_metadata["dataplaneEndpoints"]["arcGlobalNotificationServiceEndpoint"]
config_endpoint = arm_metadata["dataplaneEndpoints"]["arcConfigEndpoint"]
his_endpoint = arm_metadata["dataplaneEndpoints"]["arcHybridIdentityServiceEndpoint"]
if his_endpoint[-1] != "/":
his_endpoint = his_endpoint + "/"
his_endpoint = his_endpoint + f"discovery?location={location}&api-version=1.0-preview"
relay_endpoint = arm_metadata["suffixes"]["relayEndpointSuffix"]
active_directory = arm_metadata["authentication"]["loginEndpoint"]
cmd_helm_install.extend(
[
"--set", "systemDefaultValues.azureResourceManagerEndpoint={}".format(resource_manager),
"--set", "systemDefaultValues.azureArcAgents.config_dp_endpoint_override={}".format(config_endpoint),
"--set", "systemDefaultValues.clusterconnect-agent.notification_dp_endpoint_override={}".format(notification_endpoint),
"--set", "systemDefaultValues.clusterconnect-agent.relay_endpoint_suffix_override={}".format(relay_endpoint),
"--set", "systemDefaultValues.clusteridentityoperator.his_endpoint_override={}".format(his_endpoint),
"--set", "systemDefaultValues.activeDirectoryEndpoint={}".format(active_directory)
]
)
# Add custom-locations related params
if enable_custom_locations and not enable_private_link:
cmd_helm_install.extend(["--set", "systemDefaultValues.customLocations.enabled=true"])
cmd_helm_install.extend(["--set", "systemDefaultValues.customLocations.oid={}".format(custom_locations_oid)])
# Disable cluster connect if private link is enabled
if enable_private_link is True:
cmd_helm_install.extend(["--set", "systemDefaultValues.clusterconnect-agent.enabled=false"])
# To set some other helm parameters through file
if values_file:
cmd_helm_install.extend(["-f", values_file])
if disable_auto_upgrade:
cmd_helm_install.extend(["--set", "systemDefaultValues.azureArcAgents.autoUpdate={}".format("false")])
if https_proxy:
cmd_helm_install.extend(["--set", "global.httpsProxy={}".format(https_proxy)])
if http_proxy:
cmd_helm_install.extend(["--set", "global.httpProxy={}".format(http_proxy)])
if no_proxy:
cmd_helm_install.extend(["--set", "global.noProxy={}".format(no_proxy)])
if proxy_cert:
cmd_helm_install.extend(["--set-file", "global.proxyCert={}".format(proxy_cert)])
cmd_helm_install.extend(["--set", "global.isCustomCert={}".format(True)])
if https_proxy or http_proxy or no_proxy:
cmd_helm_install.extend(["--set", "global.isProxyEnabled={}".format(True)])
if container_log_path is not None:
cmd_helm_install.extend(["--set", "systemDefaultValues.fluent-bit.containerLogPath={}".format(container_log_path)])
if kube_config:
cmd_helm_install.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_install.extend(["--kube-context", kube_context])
if not no_wait:
# Change --timeout format for helm client to understand
onboarding_timeout = onboarding_timeout + "s"
cmd_helm_install.extend(["--wait", "--timeout", "{}".format(onboarding_timeout)])
response_helm_install = Popen(cmd_helm_install, stdout=PIPE, stderr=PIPE)
_, error_helm_install = response_helm_install.communicate()
if response_helm_install.returncode != 0:
helm_install_error_message = error_helm_install.decode("ascii")
if any(message in helm_install_error_message for message in consts.Helm_Install_Release_Userfault_Messages):
telemetry.set_user_fault()
telemetry.set_exception(exception=helm_install_error_message, fault_type=consts.Install_HelmRelease_Fault_Type,
summary='Unable to install helm release')
logger.warning("Please check if the azure-arc namespace was deployed and run 'kubectl get pods -n azure-arc' to check if all the pods are in running state. A possible cause for pods stuck in pending state could be insufficient resources on the kubernetes cluster to onboard to arc.")
raise CLIInternalError("Unable to install helm release: " + error_helm_install.decode("ascii"))
def get_release_namespace(kube_config, kube_context, helm_client_location, release_name='azure-arc'):
cmd_helm_release = [helm_client_location, "list", "-a", "--all-namespaces", "--output", "json"]
if kube_config:
cmd_helm_release.extend(["--kubeconfig", kube_config])
if kube_context:
cmd_helm_release.extend(["--kube-context", kube_context])
response_helm_release = Popen(cmd_helm_release, stdout=PIPE, stderr=PIPE)
output_helm_release, error_helm_release = response_helm_release.communicate()
if response_helm_release.returncode != 0:
if 'forbidden' in error_helm_release.decode("ascii") or "Kubernetes cluster unreachable" in error_helm_release.decode("ascii"):
telemetry.set_user_fault()
telemetry.set_exception(exception=error_helm_release.decode("ascii"), fault_type=consts.List_HelmRelease_Fault_Type,
summary='Unable to list helm release')
raise CLIInternalError("Helm list release failed: " + error_helm_release.decode("ascii"))
output_helm_release = output_helm_release.decode("ascii")
try:
output_helm_release = json.loads(output_helm_release)
except json.decoder.JSONDecodeError:
return None
for release in output_helm_release:
if release['name'] == release_name:
return release['namespace']
return None
def flatten(dd, separator='.', prefix=''):
try:
if isinstance(dd, dict):
return {prefix + separator + k if prefix else k: v for kk, vv in dd.items() for k, v in flatten(vv, separator, kk).items()}
else:
return {prefix: dd}
except Exception as e:
telemetry.set_exception(exception=e, fault_type=consts.Error_Flattening_User_Supplied_Value_Dict,
summary='Error while flattening the user supplied helm values dict')
raise CLIInternalError("Error while flattening the user supplied helm values dict")
def check_features_to_update(features_to_update):
update_cluster_connect, update_azure_rbac, update_cl = False, False, False
for feature in features_to_update:
if feature == "cluster-connect":
update_cluster_connect = True
elif feature == "azure-rbac":
update_azure_rbac = True
elif feature == "custom-locations":
update_cl = True
return update_cluster_connect, update_azure_rbac, update_cl
def user_confirmation(message, yes=False):
if yes:
return
try:
if not prompt_y_n(message):
raise ManualInterrupt('Operation cancelled.')
except NoTTYException:
raise CLIInternalError('Unable to prompt for confirmation as no tty available. Use --yes.')
def is_guid(guid):
import uuid
try:
uuid.UUID(guid)
return True
except ValueError:
return False
def try_list_node_fix():
try:
from kubernetes.client.models.v1_container_image import V1ContainerImage
def names(self, names):
self._names = names
V1ContainerImage.names = V1ContainerImage.names.setter(names)
except Exception as ex:
logger.debug("Error while trying to monkey patch the fix for list_node(): {}".format(str(ex)))
def check_provider_registrations(cli_ctx, subscription_id):
try:
rp_client = resource_providers_client(cli_ctx, subscription_id)
cc_registration_state = rp_client.get(consts.Connected_Cluster_Provider_Namespace).registration_state
if cc_registration_state != "Registered":
telemetry.set_exception(exception="{} provider is not registered".format(consts.Connected_Cluster_Provider_Namespace), fault_type=consts.CC_Provider_Namespace_Not_Registered_Fault_Type,
summary="{} provider is not registered".format(consts.Connected_Cluster_Provider_Namespace))
raise ValidationError("{} provider is not registered. Please register it using 'az provider register -n 'Microsoft.Kubernetes' before running the connect command.".format(consts.Connected_Cluster_Provider_Namespace))
kc_registration_state = rp_client.get(consts.Kubernetes_Configuration_Provider_Namespace).registration_state
if kc_registration_state != "Registered":
telemetry.set_user_fault()
logger.warning("{} provider is not registered".format(consts.Kubernetes_Configuration_Provider_Namespace))
except ValidationError as e:
raise e
except Exception as ex:
logger.warning("Couldn't check the required provider's registration status. Error: {}".format(str(ex)))
def can_create_clusterrolebindings():
try:
api_instance = kube_client.AuthorizationV1Api()
access_review = kube_client.V1SelfSubjectAccessReview(spec={
"resourceAttributes": {
"verb": "create",
"resource": "clusterrolebindings",
"group": "rbac.authorization.k8s.io"
}
})
response = api_instance.create_self_subject_access_review(access_review)
return response.status.allowed
except Exception as ex:
logger.warning("Couldn't check for the permission to create clusterrolebindings on this k8s cluster. Error: {}".format(str(ex)))
return "Unknown"
def validate_node_api_response(api_instance, node_api_response):
if node_api_response is None:
try:
node_api_response = api_instance.list_node()
return node_api_response
except Exception as ex:
logger.debug("Error occcured while listing nodes on this kubernetes cluster: {}".format(str(ex)))
return None
else:
return node_api_response
def az_cli(args_str):
args = args_str.split()
cli = get_default_cli()
cli.invoke(args, out_file=open(os.devnull, 'w'))
if cli.result.result:
return cli.result.result
elif cli.result.error:
raise Exception(cli.result.error)
return True
# def is_cli_using_msal_auth():
# response_cli_version = az_cli("version --output json")
# try:
# cli_version = response_cli_version['azure-cli']
# except Exception as ex:
# raise CLIInternalError("Unable to decode the az cli version installed: {}".format(str(ex)))
# if version.parse(cli_version) >= version.parse(consts.AZ_CLI_ADAL_TO_MSAL_MIGRATE_VERSION):
# return True
# else:
# return False
def is_cli_using_msal_auth():
response_cli_version = az_cli("version --output json")
try:
cli_version = response_cli_version['azure-cli']
except Exception as ex:
raise CLIInternalError("Unable to decode the az cli version installed: {}".format(str(ex)))
v1 = cli_version
v2 = consts.AZ_CLI_ADAL_TO_MSAL_MIGRATE_VERSION
for i, j in zip(map(int, v1.split(".")), map(int, v2.split("."))):
if i == j:
continue
return i > j
return len(v1.split(".")) == len(v2.split("."))
def get_metadata(arm_endpoint, api_version="2022-09-01"):
metadata_url_suffix = f"/metadata/endpoints?api-version={api_version}"
metadata_endpoint = None
try:
import requests
session = requests.Session()
metadata_endpoint = arm_endpoint + metadata_url_suffix
print(f"Retrieving ARM metadata from: {metadata_endpoint}")
response = session.get(metadata_endpoint)
if response.status_code == 200:
return response.json()
else:
msg = f"ARM metadata endpoint '{metadata_endpoint}' returned status code {response.status_code}."
raise HttpResponseError(msg)
except Exception as err:
msg = f"Failed to request ARM metadata {metadata_endpoint}."
print(msg, file=sys.stderr)
print(f"Please ensure you have network connection. Error: {str(err)}", file=sys.stderr)
arm_exception_handler(err, msg)
|
def fizz_buzz(n):
# Write your code here
p = ""
for i in range(1, n + 1):
if i % 3 == 0 and i % 5 == 0:
p = "FizzBuzz"
elif i % 3 == 0:
p = "Fizz"
elif i % 5 == 0:
p = "Buzz"
else:
p = i
print(f'{p}.')
if __name__ == "__main__":
fizz_buzz(15)
|
from tests.modules.FlaskModule.API.user.BaseUserAPITest import BaseUserAPITest
from opentera.db.models.TeraDevice import TeraDevice
class UserQueryDeviceSubTypesTest(BaseUserAPITest):
test_endpoint = '/api/user/devicesubtypes'
def test_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.get(self.test_endpoint)
self.assertEqual(401, response.status_code)
def test_post_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.post(self.test_endpoint)
self.assertEqual(401, response.status_code)
def test_delete_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.delete(self.test_endpoint)
self.assertEqual(response.status_code, 401)
def test_query_no_params_as_admin(self):
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin')
self.assertEqual(response.status_code, 200)
def _checkJson(self, json_data, minimal=False):
for js in json_data:
self.assertGreater(len(js), 0)
self.assertTrue(js.__contains__('device_subtype_name'))
self.assertTrue(js.__contains__('id_device_subtype'))
self.assertTrue(js.__contains__('id_device_type'))
self.assertTrue(js.__contains__('device_subtype_parent'))
def test_query_get_as_admin(self):
params = {'id_device_type': 0, 'list': False}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin', params=params)
self.assertEqual(response.status_code, 403)
params = {'id_device_subtype': 1, 'list': False}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin', params=params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data)
params = {'id_device_subtype': 2, 'list': True}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin', params=params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data)
params = {'id_device_subtype': 5, 'list': False}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin', params=params)
self.assertEqual(response.status_code, 403)
params = {'id_device_type': 4, 'list': True}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin', params=params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
json_data = response.json
self._checkJson(json_data=json_data)
def test_query_post_as_admin(self):
params = {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0, 'id_device_type': 2}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 400, msg='Missing device_subtype')
new_id = []
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 2}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 200)
new_id.append(response.json[0]['id_device_subtype'])
self._checkJson(json_data=response.json)
# Create same name but different id_device_type = 8 - Pass expected
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 3}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 200)
new_id.append(response.json[0]['id_device_subtype'])
self._checkJson(json_data=response.json)
# Create id_device_type wrong - 500 expected
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 10}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 500)
# update name without id_device_type, accepted
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype_2', 'id_device_subtype': new_id[0]}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 200)
# update the name - Pass expected
params = {'device_subtype': {'id_device_subtype': new_id[0], 'id_device_type': 2,
'device_subtype_name': 'New_Device_Subtype_2'}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 200)
self._checkJson(json_data=response.json)
# Update the ID of an unexisting device
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': new_id[1]+1,
'id_device_type': 3}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=params)
self.assertEqual(response.status_code, 400)
# Delete the objects created by the test
for id_to_del in new_id:
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': id_to_del})
self.assertEqual(response.status_code, 200)
def test_query_post_as_user(self):
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 2}}
response = self._post_with_user_http_auth(self.test_client, username='user4', password='user4', json=params)
self.assertEqual(response.status_code, 403)
response = self._post_with_user_http_auth(self.test_client, username='siteadmin', password='siteadmin',
json=params)
self.assertEqual(response.status_code, 403)
response = self._post_with_user_http_auth(self.test_client, username='user3', password='user3', json=params)
self.assertEqual(response.status_code, 403)
def test_query_delete_as_admin(self):
with self._flask_app.app_context():
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 2}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin', json=params)
self.assertEqual(response.status_code, 200)
new_id = response.json[0]['id_device_subtype']
self._checkJson(json_data=response.json)
# Delete without param
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin')
self.assertEqual(response.status_code, 400)
# Create a new device of that subtype
json_device = {
'id_device': 0,
'id_device_subtype': new_id,
'id_device_type': 1,
'device_name': 'Test Device'
}
device = TeraDevice()
device.from_json(json_device)
TeraDevice.insert(device)
# Deleting the new device type
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': new_id})
self.assertEqual(response.status_code, 500, msg='Device of that subtype exists')
TeraDevice.delete(device.id_device)
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': new_id})
self.assertEqual(response.status_code, 200)
def test_query_delete_as_user(self):
params = {'device_subtype': {'device_subtype_name': 'New_Device_Subtype', 'id_device_subtype': 0,
'id_device_type': 2}}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin', json=params)
self.assertEqual(response.status_code, 200)
new_id = response.json[0]['id_device_subtype']
self._checkJson(json_data=response.json)
response = self._delete_with_user_http_auth(self.test_client, username='user4', password='user4',
params={'id': new_id})
self.assertEqual(response.status_code, 403)
response = self._delete_with_user_http_auth(self.test_client, username='siteadmin', password='siteadmin',
params={'id': new_id})
self.assertEqual(response.status_code, 403)
response = self._delete_with_user_http_auth(self.test_client, username='user3', password='user3',
params={'id': new_id})
self.assertEqual(response.status_code, 403)
# Deleting the new device type
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': new_id})
self.assertEqual(response.status_code, 200)
|
# ImageNet dataset
# http://image-net.org/download-images
from Constants import *
from Functions import *
from fastText import *
import string
import glob
import os
import re
def get_image_folders(path=IMAGENET_IMAGES_DIR):
path = r"{}".format(path)
all_folders = os.listdir(path)
return all_folders
def get_image_files(path):
path = r"{}".format(path)
all_files = glob.glob(path + "/*.JPEG")
return all_files
def read_index():
d = {}
with open(IMAGENET_INDEX_DIR) as f:
d = dict(x.rstrip().split(None, 1) for x in f)
return d
def updateIndex():
folders = get_image_folders()
index = read_index()
index_updated = {}
for folder in folders:
if folder in index:
index_updated[folder] = index[folder]
return index_updated
def searchTerm(index, term):
icon_list = []
numbers = []
for n in index:
words = index[n]
list = [word.strip(string.punctuation) for word in words.split()]
if term in list:
icon_list.append(index[n])
numbers.append(n)
return numbers, icon_list
# semi-automatic search - user choice
def semi(term,icon_type):
# get index of words/terms in the dataset
index = updateIndex()
# get folder numbers and icon list of matched terms
numbers, icon_list = searchTerm(index, term)
if not icon_list:
return None
print('Search results for icons with the term',term,':')
jprint(icon_list)
# request input of the name of the chosen icon from the list of matched icons
icon_name = input('Type name of chosen icon: ')
# get folder number from list of matched numbers
i = icon_list.index(icon_name)
number = numbers[i]
source = IMAGENET_IMAGES_DIR + number + '/images'
images = get_image_files(source)
return images, term, icon_type
# automatic search using fastText
def auto(term,type,icon_type):
# get index of words/terms in the dataset
index = updateIndex()
# get folder numbers and icon list of matched terms
numbers, icon_list = searchTerm(index, term)
if not icon_list:
return None
jprint(icon_list)
if type == 'opposite':
if icon_type == 'least':
icon_name = getLeastSimilar(term,icon_list)
else:
icon_name = get2MostSimilar(term,icon_list)
else:
icon_name = getMostSimilar(term,icon_list)
print('Icon chosen by fastText: ', icon_name)
# get folder number from list of matched numbers
i = icon_list.index(icon_name)
number = numbers[i]
source = IMAGENET_IMAGES_DIR + number + '/images'
images = get_image_files(source)
return images, term, icon_type
def searchImageNet(term,type,icon_type):
if type == 'semi':
return semi(term,icon_type)
# auto and opposite go through here
else:
return auto(term,type,icon_type) |
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
from PIL import Image
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
import os
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, 'oxford_robotcar'))):
from data_loader.oxford_robotcar.interpolate_poses import interpolate_poses
from utils import map_fn
class TUMRGBDDataset(Dataset):
# _intrinsics = torch.tensor(
# [[517.3, 0, 318.6, 0],
# [0, 516.5, 255.3, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]
# ], dtype=torch.float32)
_intrinsics = torch.tensor(
[[535.4, 0, 320.1, 0],
[0, 539.2, 247.6, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=torch.float32)
_depth_scale = 1.035 / 5000.
_swapaxes = torch.tensor([[[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]], dtype=torch.float32)
_swapaxes_ = torch.inverse(_swapaxes)
def __init__(self, dataset_dir, frame_count=2, target_image_size=(480, 640), dilation=1):
"""
Dataset implementation for TUM RGBD.
"""
self.dataset_dir = Path(dataset_dir)
self.frame_count = frame_count
self.dilation = dilation
self.target_image_size = target_image_size
(rgb_times, self._rgb_paths) = self.load_file_times(self.dataset_dir / "rgb.txt")
(pose_times, self._raw_poses) = self.load_pose_times(self.dataset_dir / "groundtruth.txt")
(depth_times, self._depth_paths) = self.load_file_times(self.dataset_dir / "depth.txt")
self._image_index = self.build_image_index(rgb_times, pose_times, depth_times)
self._poses = self.build_pose(pose_times, self._raw_poses, rgb_times)
self._offset = (frame_count // 2) * self.dilation
self._length = self._image_index.shape[0] - frame_count * dilation
def __getitem__(self, index: int):
frame_count = self.frame_count
offset = self._offset
keyframe_intrinsics = self._intrinsics
keyframe = self.open_image(index + offset)
# keyframe_pose = self._raw_poses[self._image_index[index + offset, 0]]
keyframe_pose = self._poses[index + offset]
keyframe_depth = self.open_depth(index + offset)
frames = [self.open_image(index + i) for i in range(0, (frame_count + 1) * self.dilation, self.dilation) if i != offset]
intrinsics = [self._intrinsics for _ in range(frame_count)]
# poses = [self._raw_poses[self._image_index[index + i, 0]] for i in range(0, (frame_count + 1) * self.dilation, self.dilation) if i != offset]
poses = [self._poses[index + i] for i in range(0, (frame_count + 1) * self.dilation, self.dilation) if i != offset]
data = {
"keyframe": keyframe,
"keyframe_pose": keyframe_pose,
"keyframe_intrinsics": keyframe_intrinsics,
"frames": frames,
"poses": poses,
"intrinsics": intrinsics,
"sequence": torch.tensor([0]),
"image_id": torch.tensor([index + offset])
}
return data, keyframe_depth
def __len__(self) -> int:
return self._length
def build_pose(self, pose_times, poses, rgb_times):
return torch.tensor(np.array(interpolate_poses(pose_times.tolist(), list(poses), rgb_times.tolist(), rgb_times[0])), dtype=torch.float32)
def build_image_index(self, rgb_times, pose_times, depth_times):
curr_pose_i = 0
curr_depth_i = 0
image_index = np.zeros((rgb_times.shape[0], 2), dtype=np.int)
for i, timestamp in enumerate(rgb_times):
while (curr_pose_i + 1 < pose_times.shape[0]) and abs(timestamp - pose_times[curr_pose_i]) > abs(timestamp - pose_times[curr_pose_i + 1]):
curr_pose_i += 1
while (curr_depth_i + 1 < depth_times.shape[0]) and abs(timestamp - depth_times[curr_depth_i]) > abs(timestamp - depth_times[curr_depth_i + 1]):
curr_depth_i += 1
image_index[i, 0] = curr_pose_i
image_index[i, 1] = curr_depth_i
return image_index
def load_file_times(self, file):
with open(file, "r") as f:
lines = f.readlines()
lines = lines[3:]
pairs = [l.split(" ") for l in lines]
times = np.array([float(p[0]) for p in pairs])
paths = [p[1][:-1] for p in pairs]
return times, paths
def load_pose_times(self, file):
with open(file, "r") as f:
lines = f.readlines()
lines = lines[3:]
data = np.genfromtxt(lines, dtype=np.float64)
times = data[:, 0]
ts = torch.tensor(data[:, 1:4])
qs = torch.tensor(data[:, [7, 4, 5, 6]])
rs = torch.eye(4).unsqueeze(0).repeat(qs.shape[0], 1, 1)
rs[:, :3, :3] = torch.tensor(Rotation.from_quat(qs).as_matrix())
rs[:, :3, 3] = ts
poses = rs.to(torch.float32)
poses[:, :3, 3] = ts
poses[:, 3, 3] = 1
return times, poses
def open_image(self, index):
i = torch.tensor(np.asarray(Image.open(self.dataset_dir / self._rgb_paths[index])), dtype=torch.float32)
i = i / 255 - .5
i = i.permute(2, 0, 1)
return i
def open_depth(self, index):
d = torch.tensor(np.asarray(Image.open(self.dataset_dir / self._depth_paths[self._image_index[index, 1]])), dtype=torch.float32)
invalid = d == 0
d = 1 / (d * self._depth_scale)
d[invalid] = 0
return d.unsqueeze(0) |
from unittest import TestCase
from pprint import pprint
from constants import Constants
from fortifyapi import FortifySSCClient, Query
class TestArtifacts(TestCase):
c = Constants()
def test_version_artifact(self):
client = FortifySSCClient(self.c.url, self.c.token)
self.c.setup_proxy(client)
pname = 'Unit Test Python - Artifact'
pv = client.projects.upsert(pname, 'default')
self.assertIsNotNone(pv)
self.assertTrue(pv['committed'])
artifacts = list(pv.artifacts.list())
self.assertEqual(0, len(artifacts), 'We actually had artifacts?')
a = pv.upload_artifact('tests/resources/scan_20.1.fpr')
self.assertIsNotNone(a)
artifacts = list(pv.artifacts.list())
self.assertEqual(1, len(artifacts))
a = artifacts[0]
pprint(a)
# clean up
pv = list(client.projects.list(q=Query().query('name', pname)))
for e in pv:
e.delete()
|
x=int(input("введите стоимость монитора "))
y=int(input("введите стоимость сисьтемного блока "))
z=int(input("введите стоимость клавиатуры "))
print("стоимость трех компьютеров равна",(x+y+z+n)*3)
|
def START():
event = input()
if event == "a":
return stateA()
else:
return stateB()
def stateA():
print("State A")
def stateB():
print("State B")
START()
|
import numpy as np
from abc import ABCMeta, abstractmethod
from .optimization import gd, cd
class BaseLinearModel(metaclass=ABCMeta):
""" Base linear model """
def __init__(self, n_iters=1000, tol=.0001, debug=False):
self._coef = None
self._norm = None
self._n_iters = n_iters
self._debug = debug
self._tol = tol
def fit(self, X, y):
# add intercept column
X_copy = np.insert(X, 0, [1], axis=1)
X_copy, self._norm = self._normalize(X_copy)
self._coef, cost = self._solver(X_copy, y)
self._coef = self._coef / self._norm.reshape(self._coef.shape)
if self._debug:
return cost
else:
return None
def predict(self, X):
if self._coef is None:
raise Exception('Model isn\'t fitted')
X_copy = np.insert(X, 0, [1], axis=1)
return self._decision_function(X_copy, self._coef)
@staticmethod
def _decision_function(X, coef):
return np.dot(X, coef)
@staticmethod
def _normalize(X):
norm = np.sqrt(np.sum(X**2, axis=0))
return X / norm, norm
@abstractmethod
def _loss(self, X, y, coef):
pass
@abstractmethod
def _solver(self, X, y):
pass
class LinearRegression(BaseLinearModel):
""" Linear regression """
def __init__(self, alpha=.01, n_iters=1000, tol=.0001, debug=False):
self._alpha = alpha
super(LinearRegression, self).__init__(
n_iters=n_iters,
tol=tol,
debug=debug
)
def _solver(self, X, y):
return gd(
X,
y,
gradient_f=self._gradient_f,
cost_f=self._cost_f,
alpha=self._alpha,
n_iters=self._n_iters,
tol=self._tol,
debug=self._debug
)
def _loss(self, X, y, coef):
return y - self._decision_function(X, coef)
def _cost_f(self, X, y, coef):
loss = self._loss(X, y, coef)
m = X.shape[0]
return np.sum(loss ** 2) / (2 * m)
def _gradient_f(self, X, y, coef):
loss = self._loss(X, y, coef)
m = X.shape[0]
return -np.dot(X.T, loss) / m
class RidgeRegression(LinearRegression):
""" Ridge regression (Linear regression with L2 regularization) """
def __init__(self, alpha=.01, l2_penalty=.1, n_iters=1000, tol=.0001, debug=False):
self._l2_penalty = l2_penalty
super(RidgeRegression, self).__init__(
alpha=alpha,
n_iters=n_iters,
tol=tol,
debug=debug
)
def _cost_f(self, X, y, coef):
loss = self._loss(X, y, coef)
m = X.shape[0]
penalty = self._l2_penalty * np.sum(np.dot(coef[1:].T, coef[1:]))
return (np.dot(loss.T, loss).flatten() + penalty) / (2 * m)
def _gradient_f(self, X, y, coef):
loss = self._loss(X, y, coef)
m = X.shape[0]
penalty = self._l2_penalty * np.sum(coef[1:])
gradient = -np.dot(X.T, loss) + penalty
gradient[0] -= penalty
return gradient / m
class Lasso(BaseLinearModel):
""" LASSO (Least Absolute Shrinkage and Selection Operator, linear regression with L1 regularization) """
def __init__(self, l1_penalty=.1, n_iters=1000, tol=.0001, debug=False):
self._l1_penalty = l1_penalty
super(Lasso, self).__init__(
n_iters=n_iters,
tol=tol,
debug=debug
)
def _solver(self, X, y):
return cd(
X,
y.flatten(),
optimize_f=self._optimize_f,
n_iters=self._n_iters,
tol=self._tol,
debug=self._debug
)
def _loss(self, X, y, coef):
return y - self._decision_function(X, coef)
def _optimize_f(self, X, y, coef, j):
loss = self._loss(X, y, coef)
# ro is greek ρ
ro = np.sum(np.dot(X[:, j].T, loss))
return ro if j == 0 else self._soft_threshold(ro)
def _soft_threshold(self, ro):
if ro < -self._l1_penalty/2:
return ro + self._l1_penalty/2
elif -self._l1_penalty/2 <= ro <= self._l1_penalty/2:
return 0
elif ro > self._l1_penalty/2:
return ro - self._l1_penalty/2
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save
from django.dispatch import receiver
from . import models
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_related_models_for_new_user(sender, instance, created, **kwargs):
""" Whenever a user is created, also create any related models. """
if created:
details = models.UserDetails(user=instance)
details.save()
|
from gym.envs.registration import register
register(
id='Quadrotor-v0',
entry_point='gym_Quadrotor.envs:QuadrotorEnv',
)
register(
id='Quadrotor-extrahard-v0',
entry_point='gym_Quadrotor.envs:QuadrotorExtraHardEnv',
)
|
import os
import pytest
# IMPORTANT keep this above all other borg imports to avoid inconsistent values
# for `from borg.constants import PBKDF2_ITERATIONS` (or star import) usages before
# this is executed
from borg import constants
# no fixture-based monkey-patching since star-imports are used for the constants module
constants.PBKDF2_ITERATIONS = 1
# needed to get pretty assertion failures in unit tests:
if hasattr(pytest, 'register_assert_rewrite'):
pytest.register_assert_rewrite('borg.testsuite')
import borg.cache
from borg.logger import setup_logging
# Ensure that the loggers exist for all tests
setup_logging()
from borg.testsuite import has_lchflags, has_llfuse
from borg.testsuite import are_symlinks_supported, are_hardlinks_supported, is_utime_fully_supported
from borg.testsuite.platform import fakeroot_detected, are_acls_working
from borg import xattr
@pytest.fixture(autouse=True)
def clean_env(tmpdir_factory, monkeypatch):
# avoid that we access / modify the user's normal .config / .cache directory:
monkeypatch.setenv('XDG_CONFIG_HOME', str(tmpdir_factory.mktemp('xdg-config-home')))
monkeypatch.setenv('XDG_CACHE_HOME', str(tmpdir_factory.mktemp('xdg-cache-home')))
# also avoid to use anything from the outside environment:
keys = [key for key in os.environ if key.startswith('BORG_')]
for key in keys:
monkeypatch.delenv(key, raising=False)
def pytest_report_header(config, startdir):
tests = {
"BSD flags": has_lchflags,
"fuse": has_llfuse,
"root": not fakeroot_detected(),
"symlinks": are_symlinks_supported(),
"hardlinks": are_hardlinks_supported(),
"atime/mtime": is_utime_fully_supported(),
"modes": "BORG_TESTS_IGNORE_MODES" not in os.environ
}
enabled = []
disabled = []
for test in tests:
if tests[test]:
enabled.append(test)
else:
disabled.append(test)
output = "Tests enabled: " + ", ".join(enabled) + "\n"
output += "Tests disabled: " + ", ".join(disabled)
return output
class DefaultPatches:
def __init__(self, request):
self.org_cache_wipe_cache = borg.cache.LocalCache.wipe_cache
def wipe_should_not_be_called(*a, **kw):
raise AssertionError("Cache wipe was triggered, if this is part of the test add @pytest.mark.allow_cache_wipe")
if 'allow_cache_wipe' not in request.keywords:
borg.cache.LocalCache.wipe_cache = wipe_should_not_be_called
request.addfinalizer(self.undo)
def undo(self):
borg.cache.LocalCache.wipe_cache = self.org_cache_wipe_cache
@pytest.fixture(autouse=True)
def default_patches(request):
return DefaultPatches(request)
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit tests for the sitewide_helpers module."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from proto import project_pb2
from services import service_manager
from sitewide import sitewide_helpers
from testing import fake
REGULAR_USER_ID = 111
ADMIN_USER_ID = 222
OTHER_USER_ID = 333
# Test project IDs
REGULAR_OWNER_LIVE = 1001
REGULAR_OWNER_ARCHIVED = 1002
REGULAR_OWNER_DELETABLE = 1003
REGULAR_COMMITTER_LIVE = 2001
REGULAR_COMMITTER_ARCHIVED = 2002
REGULAR_COMMITTER_DELETABLE = 2003
OTHER_OWNER_LIVE = 3001
OTHER_OWNER_ARCHIVED = 3002
OTHER_OWNER_DELETABLE = 3003
OTHER_COMMITTER_LIVE = 4001
MEMBERS_ONLY = 5001
class HelperFunctionsTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
project=fake.ProjectService(),
user=fake.UserService(),
project_star=fake.ProjectStarService())
self.cnxn = 'fake cnxn'
for user_id in (ADMIN_USER_ID, REGULAR_USER_ID, OTHER_USER_ID):
self.services.user.TestAddUser('ignored_%s@gmail.com' % user_id, user_id)
self.regular_owner_live = self.services.project.TestAddProject(
'regular-owner-live', state=project_pb2.ProjectState.LIVE,
owner_ids=[REGULAR_USER_ID], project_id=REGULAR_OWNER_LIVE)
self.regular_owner_archived = self.services.project.TestAddProject(
'regular-owner-archived', state=project_pb2.ProjectState.ARCHIVED,
owner_ids=[REGULAR_USER_ID], project_id=REGULAR_OWNER_ARCHIVED)
self.regular_owner_deletable = self.services.project.TestAddProject(
'regular-owner-deletable', state=project_pb2.ProjectState.DELETABLE,
owner_ids=[REGULAR_USER_ID], project_id=REGULAR_OWNER_DELETABLE)
self.regular_committer_live = self.services.project.TestAddProject(
'regular-committer-live', state=project_pb2.ProjectState.LIVE,
committer_ids=[REGULAR_USER_ID], project_id=REGULAR_COMMITTER_LIVE)
self.regular_committer_archived = self.services.project.TestAddProject(
'regular-committer-archived', state=project_pb2.ProjectState.ARCHIVED,
committer_ids=[REGULAR_USER_ID], project_id=REGULAR_COMMITTER_ARCHIVED)
self.regular_committer_deletable = self.services.project.TestAddProject(
'regular-committer-deletable', state=project_pb2.ProjectState.DELETABLE,
committer_ids=[REGULAR_USER_ID], project_id=REGULAR_COMMITTER_DELETABLE)
self.other_owner_live = self.services.project.TestAddProject(
'other-owner-live', state=project_pb2.ProjectState.LIVE,
owner_ids=[OTHER_USER_ID], project_id=OTHER_OWNER_LIVE)
self.other_owner_archived = self.services.project.TestAddProject(
'other-owner-archived', state=project_pb2.ProjectState.ARCHIVED,
owner_ids=[OTHER_USER_ID], project_id=OTHER_OWNER_ARCHIVED)
self.other_owner_deletable = self.services.project.TestAddProject(
'other-owner-deletable', state=project_pb2.ProjectState.DELETABLE,
owner_ids=[OTHER_USER_ID], project_id=OTHER_OWNER_DELETABLE)
self.other_committer_live = self.services.project.TestAddProject(
'other-committer-live', state=project_pb2.ProjectState.LIVE,
committer_ids=[OTHER_USER_ID], project_id=OTHER_COMMITTER_LIVE)
self.regular_user = self.services.user.GetUser(self.cnxn, REGULAR_USER_ID)
self.admin_user = self.services.user.TestAddUser(
'administrator@chromium.org', ADMIN_USER_ID)
self.admin_user.is_site_admin = True
self.other_user = self.services.user.GetUser(self.cnxn, OTHER_USER_ID)
self.members_only_project = self.services.project.TestAddProject(
'members-only', owner_ids=[REGULAR_USER_ID], project_id=MEMBERS_ONLY)
self.members_only_project.access = project_pb2.ProjectAccess.MEMBERS_ONLY
def assertProjectsAnyOrder(self, actual_projects, *expected_projects):
# Check names rather than Project objects so that output is easier to read.
actual_names = [p.project_name for p in actual_projects]
expected_names = [p.project_name for p in expected_projects]
self.assertItemsEqual(expected_names, actual_names)
def testFilterViewableProjects_CantViewArchived(self):
projects = list(sitewide_helpers.FilterViewableProjects(
list(self.services.project.test_projects.values()),
self.regular_user, {REGULAR_USER_ID}))
self.assertProjectsAnyOrder(
projects, self.regular_owner_live, self.regular_committer_live,
self.other_owner_live, self.other_committer_live,
self.members_only_project)
def testFilterViewableProjects_NonMemberCantViewMembersOnly(self):
projects = list(sitewide_helpers.FilterViewableProjects(
list(self.services.project.test_projects.values()),
self.other_user, {OTHER_USER_ID}))
self.assertProjectsAnyOrder(
projects, self.regular_owner_live, self.regular_committer_live,
self.other_owner_live, self.other_committer_live)
def testFilterViewableProjects_AdminCanViewAny(self):
projects = list(sitewide_helpers.FilterViewableProjects(
list(self.services.project.test_projects.values()),
self.admin_user, {ADMIN_USER_ID}))
self.assertProjectsAnyOrder(
projects, self.regular_owner_live, self.regular_committer_live,
self.other_owner_live, self.other_committer_live,
self.members_only_project)
def testGetStarredProjects_OnlyViewableLiveStarred(self):
viewed_user_id = 123
for p in self.services.project.test_projects.values():
# We go straight to the services layer because this is a test set up
# rather than an actual user request.
self.services.project_star.SetStar(
self.cnxn, p.project_id, viewed_user_id, True)
self.assertProjectsAnyOrder(
sitewide_helpers.GetViewableStarredProjects(
self.cnxn, self.services, viewed_user_id,
{REGULAR_USER_ID}, self.regular_user),
self.regular_owner_live, self.regular_committer_live,
self.other_owner_live, self.other_committer_live,
self.members_only_project)
def testGetStarredProjects_MembersOnly(self):
# Both users were able to star the project in the past. The stars do not
# go away even if access to the project changes.
self.services.project_star.SetStar(
self.cnxn, self.members_only_project.project_id, REGULAR_USER_ID, True)
self.services.project_star.SetStar(
self.cnxn, self.members_only_project.project_id, OTHER_USER_ID, True)
# But now, only one of them is currently a member, so only regular_user
# can see the starred project in the lists.
self.assertProjectsAnyOrder(
sitewide_helpers.GetViewableStarredProjects(
self.cnxn, self.services, REGULAR_USER_ID, {REGULAR_USER_ID},
self.regular_user),
self.members_only_project)
self.assertProjectsAnyOrder(
sitewide_helpers.GetViewableStarredProjects(
self.cnxn, self.services, OTHER_USER_ID, {REGULAR_USER_ID},
self.regular_user),
self.members_only_project)
# The other user cannot see the project, so he does not see it in either
# list of starred projects.
self.assertProjectsAnyOrder(
sitewide_helpers.GetViewableStarredProjects(
self.cnxn, self.services, REGULAR_USER_ID, {OTHER_USER_ID},
self.other_user)) # No expected projects listed.
self.assertProjectsAnyOrder(
sitewide_helpers.GetViewableStarredProjects(
self.cnxn, self.services, OTHER_USER_ID, {OTHER_USER_ID},
self.other_user)) # No expected projects listed.
|
#!/usr/bin/env python
"""
simple script to rename bugzill aliases on hosts that have been renamed, if
they already exist.
Usage: rename_host_bugs old-new-short-names-file
Where: old-new-short-names-file is output of "map_hosts --short"
Note: you need to manually enter your bugzilla username & password if you
want to make changes. Otherwise, update attempts will fail.
ToDo:
- add proper arguements
- add some decent help
- read as csv file? (not sure, as it will "fail safe")
"""
import logging
import pprint
import sys
import bzrest.client
from bzrest.errors import BugNotFound
username = None
password = None
logger = logging.getLogger(__name__)
def rename_tracker_bug(bz, old_name, new_name):
if old_name == new_name:
# nothing to do
return
try:
old_resp = bz.get_bug(old_name)
has_old_bug = True
except BugNotFound:
has_old_bug = False
try:
new_resp = bz.get_bug(new_name)
has_new_bug = True
except BugNotFound:
has_new_bug = False
summary = "%s problem tracking" % (new_name,)
if has_old_bug and not has_new_bug:
logger.info("Updating old (%s) to new (%s)", old_name, new_name)
resp = bz.update_bug(old_name, {'alias': new_name, 'summary': summary})
elif has_new_bug and not has_old_bug:
# check for alias as part of summary
if not new_resp['summary'] == summary:
logger.warn("fixing bad summary on %s", new_name)
resp = bz.update_bug(new_name, {'summary': summary})
elif not has_old_bug:
logger.info("No old bug (%s)", old_name)
elif has_new_bug:
logger.error("Both old (%s) and new (%s) exist", old_name, new_name)
logger.error(" old opened %s", old_resp['creation_time'])
logger.error(" new opened %s", new_resp['creation_time'])
def main():
bz = bzrest.client.BugzillaClient()
bz.configure('https://bugzilla.mozilla.org/rest', username, password)
with open(sys.argv[1], 'r') as renames:
for line in renames.readlines():
old_name, new_name = line.strip().split(',')
rename_tracker_bug(bz, old_name, new_name)
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(message)s')
raise SystemExit(main())
|
from terrabot.events.events import Events
class Packet2Parser(object):
def parse(self, world, player, data, ev_man):
ev_man.raise_event(Events.Blocked, str(data[2:], "utf-8"))
|
from events.watcher import Watcher
def handler1(previous_state, current_state):
print('Handler 1 : {} to {}'.format(previous_state, current_state))
def handler2(previous_state, current_state):
print('Handler 2 : {} to {}'.format(previous_state, current_state))
def handler3(previous_state, current_state):
print('Handler 3 : {} to {}'.format(previous_state, current_state))
if current_state == '5':
file_watcher.stop_watching()
file_path = 'test.txt'
def get_file_contents(_file_path):
file = open(_file_path, "r")
return file.read()
file_watcher = Watcher(state_func=get_file_contents, change_handlers=[
handler1, handler2, handler3], initial_state=get_file_contents(file_path), state_check_interval=2, _file_path=file_path)
file_watcher.start_watching()
|
import json
import re
from json import JSONEncoder
class Register:
def __init__(self, start_addr, word_cnt, Eui64, Tsapid, ObjId, AttrId, Idx1, Idx2, MethId, status):
self.start_addr = start_addr
self.word_cnt = word_cnt
self.Eui64 = Eui64
self.Tsapid = Tsapid
self.ObjId = ObjId
self.AttrId = AttrId
self.Idx1 = Idx1
self.Idx2 = Idx2
self.MethId = MethId
self.status = status
class RegisterEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
def runGw():
filename = "Resources/modbus_gw.ini"
reg = []
regObj = {}
with open(filename, 'r') as file:
for line in file:
# list of registers
if(line.startswith('REGISTER =')):
regObj = Register(re.split(',|\n|=', line)[1], # start_addr
re.split(',|\n|=', line)[2], # word_cnt
re.split(',|\n|=', line)[3], # EUI64
re.split(',|\n|=', line)[4], # TSAPID
re.split(',|\n|=', line)[5], # ObjId
re.split(',|\n|=', line)[6], # AttrId
re.split(',|\n|=', line)[7], # Idx1
re.split(',|\n|=', line)[9], # Idx2
re.split(',|\n|=', line)[8], # MethId
re.split(',|\n|=', line)[10]) # status
reg.append(regObj)
if not(line.startswith('\n') or line.startswith('#')):
dataJSON = json.dumps(reg, indent=4, cls=RegisterEncoder)
out_file = open("static/Modbus_Gw_File_Parsed.json", "w")
out_file.write(dataJSON)
out_file.close()
print("Gateway Parsed")
runGw() |
import unittest
from app import create_app
class ApiTestCase(unittest.TestCase):
"""This class represents the api test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
# cpf
def test_api(self):
res = self.client().get('/base_2?cpf=12345678910')
self.assertEqual(res.status_code, 200)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.IncomeDistributionTransInInfo import IncomeDistributionTransInInfo
class AnttechBlockchainFinanceDistributionRuleCreateModel(object):
def __init__(self):
self._distribution_pro_no = None
self._request_no = None
self._trans_in_info = None
self._trans_out_account_no = None
self._trans_out_account_type = None
self._trans_out_cert_no = None
self._trans_out_cert_type = None
self._trans_out_name = None
@property
def distribution_pro_no(self):
return self._distribution_pro_no
@distribution_pro_no.setter
def distribution_pro_no(self, value):
self._distribution_pro_no = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def trans_in_info(self):
return self._trans_in_info
@trans_in_info.setter
def trans_in_info(self, value):
if isinstance(value, list):
self._trans_in_info = list()
for i in value:
if isinstance(i, IncomeDistributionTransInInfo):
self._trans_in_info.append(i)
else:
self._trans_in_info.append(IncomeDistributionTransInInfo.from_alipay_dict(i))
@property
def trans_out_account_no(self):
return self._trans_out_account_no
@trans_out_account_no.setter
def trans_out_account_no(self, value):
self._trans_out_account_no = value
@property
def trans_out_account_type(self):
return self._trans_out_account_type
@trans_out_account_type.setter
def trans_out_account_type(self, value):
self._trans_out_account_type = value
@property
def trans_out_cert_no(self):
return self._trans_out_cert_no
@trans_out_cert_no.setter
def trans_out_cert_no(self, value):
self._trans_out_cert_no = value
@property
def trans_out_cert_type(self):
return self._trans_out_cert_type
@trans_out_cert_type.setter
def trans_out_cert_type(self, value):
self._trans_out_cert_type = value
@property
def trans_out_name(self):
return self._trans_out_name
@trans_out_name.setter
def trans_out_name(self, value):
self._trans_out_name = value
def to_alipay_dict(self):
params = dict()
if self.distribution_pro_no:
if hasattr(self.distribution_pro_no, 'to_alipay_dict'):
params['distribution_pro_no'] = self.distribution_pro_no.to_alipay_dict()
else:
params['distribution_pro_no'] = self.distribution_pro_no
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.trans_in_info:
if isinstance(self.trans_in_info, list):
for i in range(0, len(self.trans_in_info)):
element = self.trans_in_info[i]
if hasattr(element, 'to_alipay_dict'):
self.trans_in_info[i] = element.to_alipay_dict()
if hasattr(self.trans_in_info, 'to_alipay_dict'):
params['trans_in_info'] = self.trans_in_info.to_alipay_dict()
else:
params['trans_in_info'] = self.trans_in_info
if self.trans_out_account_no:
if hasattr(self.trans_out_account_no, 'to_alipay_dict'):
params['trans_out_account_no'] = self.trans_out_account_no.to_alipay_dict()
else:
params['trans_out_account_no'] = self.trans_out_account_no
if self.trans_out_account_type:
if hasattr(self.trans_out_account_type, 'to_alipay_dict'):
params['trans_out_account_type'] = self.trans_out_account_type.to_alipay_dict()
else:
params['trans_out_account_type'] = self.trans_out_account_type
if self.trans_out_cert_no:
if hasattr(self.trans_out_cert_no, 'to_alipay_dict'):
params['trans_out_cert_no'] = self.trans_out_cert_no.to_alipay_dict()
else:
params['trans_out_cert_no'] = self.trans_out_cert_no
if self.trans_out_cert_type:
if hasattr(self.trans_out_cert_type, 'to_alipay_dict'):
params['trans_out_cert_type'] = self.trans_out_cert_type.to_alipay_dict()
else:
params['trans_out_cert_type'] = self.trans_out_cert_type
if self.trans_out_name:
if hasattr(self.trans_out_name, 'to_alipay_dict'):
params['trans_out_name'] = self.trans_out_name.to_alipay_dict()
else:
params['trans_out_name'] = self.trans_out_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainFinanceDistributionRuleCreateModel()
if 'distribution_pro_no' in d:
o.distribution_pro_no = d['distribution_pro_no']
if 'request_no' in d:
o.request_no = d['request_no']
if 'trans_in_info' in d:
o.trans_in_info = d['trans_in_info']
if 'trans_out_account_no' in d:
o.trans_out_account_no = d['trans_out_account_no']
if 'trans_out_account_type' in d:
o.trans_out_account_type = d['trans_out_account_type']
if 'trans_out_cert_no' in d:
o.trans_out_cert_no = d['trans_out_cert_no']
if 'trans_out_cert_type' in d:
o.trans_out_cert_type = d['trans_out_cert_type']
if 'trans_out_name' in d:
o.trans_out_name = d['trans_out_name']
return o
|
# -*- coding: utf-8 -*-
from intercom.api_operations.find import Find
from intercom.api_operations.delete import Delete
from intercom.api_operations.find_all import FindAll
from intercom.api_operations.save import Save
from intercom.traits.api_resource import Resource
class Subscription(Resource, Find, FindAll, Save, Delete):
pass
|
def FindWalk(
walks,
current_walk,
current_x,
current_y,
side,
pathLength,
visited):
# If we have visited every position,
# then this is a complete walk.
if (len(current_walk) == pathLength + 1):
walks.append(current_walk)
print(walks)
else:
next_points = [
[current_x - 1, current_y],
[current_x + 1, current_y],
[current_x, current_y - 1],
[current_x, current_y + 1]
]
for point in next_points:
x, y = point[0], point[1]
if (x < 0): continue
if (x > side): continue
if (y < 0): continue
if (y > side): continue
if (visited[x][y]): continue
# Try visiting this point.
visited[x][y] = True
current_walk.append(point)
return FindWalk(walks, current_walk,
x, y, side, pathLength, visited)
# We're done visiting this point.
visited[x][y] = False
current_walk.pop()
def initWalker(pathLength):
# Start the walk at (0, 0).
current_walk = []
current_walk.append([0, 0])
walks = []
side = pathLength + 1
visited = [[False] * side] * side
visited[0][0] = True
FindWalk(walks, current_walk, 0, 0, side, pathLength, visited)
return walks
p1 = initWalker(15)
|
from extract_emails.browsers import ChromeBrowser
from extract_emails import EmailExtractor
# url = "http://www.adcottawa.com/"
# url = "https://dentistryonking.net/"
# url = "https://conklindental.ca/"
# url = "http://elliotlakedentalcentre.com/"
# url = "http://www.sudburysmiles.ca/"
# url = "https://www.downtowndentistry.com/contact-us"
url = "http://www.downtowndental.ca//"
chrome_driver = "/Users/zachyamaoka/Documents/extract-emails/user/chromedriver"
with ChromeBrowser(executable_path=chrome_driver) as browser:
email_extractor = EmailExtractor(url, browser, depth=2, link_filter=1)
emails = email_extractor.get_emails()
for email in emails:
print(email)
print(email.as_dict())
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.dev import get_docker_hostname
PORT = '6379'
PASSWORD = 'devops-best-friend'
MASTER_PORT = '6382'
REPLICA_PORT = '6380'
UNHEALTHY_REPLICA_PORT = '6381'
HOST = get_docker_hostname()
REDIS_VERSION = os.getenv('REDIS_VERSION', 'latest')
|
def solution(arr):
answer = []
for i in range(0, len(arr)):
if i < len(arr)-1 and arr[i] != arr[i+1]:
answer.append(arr[i])
if i == len(arr)-1:
answer.append(arr[i])
return answer |
import os
import pprint
def main ():
output = '';
[inputCount, numList] = readFile()
for i in range(0,int(inputCount)):
num = int(numList[i])
checklist = set()
find = False
if num != 0:
for j in range(1,10**10):
checklist = checklist.union(set(list(str(num*j))))
if sorted(checklist) == ['0','1','2','3','4','5','6','7','8','9']:
p_str = 'Case #{0}: {1}\n'.format((i+1),num*j)
output += p_str
print (p_str)
find = True
break
if find == False:
p_str = 'Case #{0}: INSOMNIA\n'.format((i+1))
output += p_str
print (p_str)
writeFile(output)
def readFile ():
with open('A-large.in') as f:
s = f.read()
s = s.split('\n')
inputCount = s.pop(0);
s.pop(-1)
return [inputCount,s]
def writeFile (str):
with open('A-large.out', 'w') as f:
f.write(str)
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# coding: utf-8
# In[17]:
import pandas as pd
data = pd.read_csv(r"C:\Users\win 10\Downloads\housing.csv", header=None, sep='\s+')
column_list = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data.columns = column_list
data.head()
# In[18]:
data.isnull().sum()
# In[19]:
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# In[20]:
x_vars = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
y_vars = ['MEDV']
g = sns.PairGrid(data, x_vars=x_vars, y_vars=y_vars)
g.fig.set_size_inches(25, 3)
g.map(sns.scatterplot)
g.add_legend()
# In[21]:
plt.figure(figsize=(20, 10))
sns.heatmap(data.corr(), annot=True)
# In[22]:
from sklearn.model_selection import train_test_split
boston = data[['INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'LSTAT', 'PTRATIO', 'MEDV']]
features = boston.drop('MEDV', axis=1)
labels = boston['MEDV']
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=43)
X_train.shape, X_test.shape, y_train.shape
# In[23]:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
model = LinearRegression()
model.fit(X_train, y_train)
y_pred_linear = model.predict(X_test)
print('MAE:', mean_absolute_error(y_pred_linear, y_test))
print('MSE:', mean_squared_error(y_pred_linear, y_test))
print('R2_score:', r2_score(y_pred_linear, y_test))
# In[24]:
sns.regplot(x=y_pred_linear, y=y_test)
plt.xlabel('predict MEDV')
plt.ylabel('MEDV')
# In[25]:
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators=100, random_state=54, max_depth=10)
regr.fit(X_train, y_train)
y_pred_rnd = regr.predict(X_test)
print('MAE:', mean_absolute_error(y_pred_rnd, y_test))
print('MSE:', mean_squared_error(y_pred_rnd, y_test))
print('R2_score:', r2_score(y_pred_rnd, y_test))
# In[26]:
feat_importances = pd.DataFrame(regr.feature_importances_, index=X_train.columns, columns=["Importance"])
feat_importances.sort_values(by='Importance', ascending=False, inplace=True)
feat_importances.plot(kind='bar', figsize=(8,6))
# In[27]:
sns.regplot(x=y_pred_rnd, y=y_test)
plt.xlabel('predict MEDV')
plt.ylabel('MEDV')
plt.xlim(5, 50)
# In[28]:
df = pd.DataFrame({'prediction': y_pred_rnd, 'test data': y_test, 'error': y_pred_rnd - y_test})
df.head()
# In[29]:
df[df['error'].abs() >= 5]
# In[ ]:
|
securityLevels = []
with open('day13input') as f:
for line in f:
line = line.replace(':','').split()
securityLevels.append([int(line[0]), int(line[1])])
scannerList = []
for item in securityLevels:
scannerList.append((item[0], item[1] * 2 - 2))
print(scannerList)
letshopeLOL = 0
caught = True
while caught:
caught = False
for item in scannerList:
#print('this modulo:', item[0], '+', letshopeLOL, '%', item[1], '=', item[0] + letshopeLOL % item[1])
if (item[0] + letshopeLOL) % item[1] == 0:
letshopeLOL += 1
caught = True
break
print(letshopeLOL) |
from enum import Enum
import numpy as np
import pandas as pd
from WeatherDataCSV import WeatherDataCSV
class WWOData(WeatherDataCSV):
class Columns(Enum):
DATE = 'date' # dd/MM/yyyy format
TIME = 'time' # int format (0, 100, 2300)
DATE_TIME = 'datetime' # int format (0, 100, 2300)
TEMP_C = 'tempC'
TEMP_F = 'tempF'
WINDSPEED_MILES = 'windspeedMiles'
WINDSPEED_KMPH = 'windspeedKmph'
WINDDIR_DEGREE = 'winddirDegree'
# WINDDIR_16POINT = 'winddir16Point'
WEATHER_COND = 'cond'
PRECIP = 'precipMM'
HUMIDITY = 'humidity'
VISIBILITY = 'visibility'
PRESSURE = 'pressure'
CLOUDCOVER = 'cloudcover'
HEATINDEX_C = 'heatIndexC'
HEATINDEX_F = 'heatIndexF'
DEWPOINT_C = 'dewPointC'
DEWPOINT_F = 'dewPointF'
WINDCHILL_C = 'windChillC'
WINDCHILL_F = 'windChillF'
WINDGUST_MILES = 'windGustMiles'
WINDGUST_KMPH = 'windGustKmph'
FEELSLIKE_C = 'feelsLikeC'
FEELSLIKE_F = 'feelsLikeF'
WEATHER_COND_RANKED_LIST = ['Clear',
'Cloudy',
'Heavy rain',
'Heavy rain at times',
'Light drizzle',
'Light rain',
'Light rain shower',
'Mist',
'Moderate or heavy rain shower',
'Moderate rain',
'Moderate rain at times',
'Overcast',
'Partly cloudy',
'Patchy light drizzle',
'Patchy light rain',
'Patchy light rain with thunder',
'Patchy rain possible',
'Sunny',
'Thundery outbreaks possible',
'Torrential rain shower']
def read_csv(self, csv_path):
cols_to_read = [2]
cols_to_read.extend(range(14, 39))
# Exclude weather code and URL value
cols_to_read.remove(20)
cols_to_read.remove(21)
cols_to_read.remove(22)
csv_df = pd.read_csv(csv_path, sep=',\s,', delimiter=',', skipinitialspace=True, usecols=cols_to_read)
return csv_df
def format(self):
# Rename columns
self.df.columns = [
self.Columns.DATE.value,
self.Columns.TIME.value,
self.Columns.TEMP_C.value,
self.Columns.TEMP_F.value,
self.Columns.WINDSPEED_MILES.value,
self.Columns.WINDSPEED_KMPH.value,
self.Columns.WINDDIR_DEGREE.value,
# self.Columns.WINDDIR_16POINT.value,
self.Columns.WEATHER_COND.value,
self.Columns.PRECIP.value,
self.Columns.HUMIDITY.value,
self.Columns.VISIBILITY.value,
self.Columns.PRESSURE.value,
self.Columns.CLOUDCOVER.value,
self.Columns.HEATINDEX_C.value,
self.Columns.HEATINDEX_F.value,
self.Columns.DEWPOINT_C.value,
self.Columns.DEWPOINT_F.value,
self.Columns.WINDCHILL_C.value,
self.Columns.WINDCHILL_F.value,
self.Columns.WINDGUST_MILES.value,
self.Columns.WINDGUST_KMPH.value,
self.Columns.FEELSLIKE_C.value,
self.Columns.FEELSLIKE_F.value]
# Merge date and time
self.df[self.Columns.DATE_TIME.value] = pd.to_datetime(self.df[self.Columns.DATE.value].apply(str) + ' ' +
self.df[self.Columns.TIME.value].apply(str).apply(lambda x: x.zfill(4)),
format='%d/%m/%Y %H%M')
# Remove date and time column
self.df.drop([self.Columns.DATE.value, self.Columns.TIME.value], axis=1, inplace=True)
# Set datetime as index
self.df.set_index([self.Columns.DATE_TIME.value], inplace=True)
def interpolate(self):
# print(self.df.head().to_string())
print(self.df.index.get_level_values(self.Columns.DATE_TIME.value).get_duplicates())
# Convert labels to ranked value
for i, weather_cond in enumerate(self.WEATHER_COND_RANKED_LIST):
self.df[self.Columns.WEATHER_COND.value] = np.where(
self.df[self.Columns.WEATHER_COND.value] == weather_cond,
str(i / (len(self.WEATHER_COND_RANKED_LIST) - 1)), self.df[self.Columns.WEATHER_COND.value])
self.df[self.Columns.WEATHER_COND.value] = self.df[self.Columns.WEATHER_COND.value].astype(float)
print(len(self.df))
self.df = self.df.resample('15T')
self.df = self.df.interpolate(method='linear')
self.df = self.df[:-1]
print(len(self.df))
# print(self.df.head(96).to_string())
def normalize(self):
# Get all unique weather condition values
# weather_cond_col_list = self.df.time_weather.unique()
# for weather_cond_col in weather_cond_col_list:
# self.df[weather_cond_col] = np.where(
# self.df[self.Columns.WEATHER_CONDITION.value] == weather_cond_col, 1, 0)
self.df = (self.df - self.df.min()) / (self.df.max() - self.df.min())
print(self.df.head(10).to_string())
|
"""
The bike costs K dollars. At the start of every day I save up N dollars
and at the start of every 10 days I spend M dollars.
Output the days it'll take to save up for the bike.
If I cannot buy the bike (I spend more than I earn, output "NO BIKE FOR YOU"
ex input:
100
3.50
8
ex output:
36
ex input 2:
100
3
35
ex output 2:
NO BIKE FOR YOU
"""
# 100/100, tried with Decimal rather than float for more precision, but the results are the same.
def get_day(days_passed: int, savings: float, bike_price: float, daily_wage: float) -> int:
"""
This function is called when we have enough money for a bike. Due to the nature of our program, most likely
we will have a surplus of money and will need to calculate backwards to pinpoint the exact moment we saved up
exactly enough for the bike. This function calculates backwards and outputs the specific day to the console.
After which, closes the program because that is all that is required from this exam problem.
"""
while savings - daily_wage >= bike_price:
savings -= daily_wage
days_passed -= 1
print(days_passed)
exit()
def main():
try:
bike_price = float(input())
daily_saved = float(input()) # the dollars_saved we save up every day
dollars_spent = float(input()) # the dollars_saved I spend every 10 days
if bike_price < 0 or daily_saved < 0 or dollars_spent < 0:
# the input is invalid
raise Exception
days_passed = 0
savings = float(0)
# 1. Have 9 days pass
savings += daily_saved * 9
days_passed += 9
if savings >= bike_price:
get_day(days_passed, savings, bike_price, daily_saved)
# 2. Check if we aren't losing money each 10 days
if (daily_saved * 10) - dollars_spent <= 0.0:
print("NO BIKE FOR YOU")
exit()
# Now we know that we have a surplus of money and will eventually save up for the bike
# 3. Have 10 days pass in a loop until we get at the desired amount
while True:
savings += (daily_saved * 10) - dollars_spent
days_passed += 10
if savings >= bike_price:
get_day(days_passed, savings, bike_price, daily_saved)
except Exception:
print("INVALID INPUT")
if __name__ == '__main__':
main()
|
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
import pickle
import 07_vizuelizacija_matrice_konfuzije
def main():
knn = KNeighborsClassifier(n_neighbors=5,metric='minkowski',algorithm = 'brute')
knn.fit(X_balanced, y_balanced)
pickle.dump(knn, open('knn_k_5_minkowski_brute.sav', 'wb'))
y_predicted_knn = knn.predict(X_test)
cm_knn = confusion_matrix(y_test, y_predicted_knn)
print(cm_knn)
#Matrica konfuzije
#[[1251 326 25 43]
#[ 31 324 13 0]
#[ 8 50 319 13]
#[ 330 59 249 984]]
print("Preciznost modela dobijenog metodom k najblizih suseda:",knn.score(X_test, y_test))
#"Preciznost modela dobijenog metodom k najblizih suseda: 0.7150310559006211
plot_confusion_matrix(cm_knn, normalize = True,
target_names = ['cetvrta' , 'druga', 'prva', 'treca'],
title = "Confusion Matrix - kNN (k = 4; metric = 'minkowski', algorithm = 'brute')")
if __name__ == "__main__":
main()
|
import sys, os, re, glob
from scripts.search_files import *
from scripts.ilapfuncs import *
import argparse
from argparse import RawTextHelpFormatter
from six.moves.configparser import RawConfigParser
from time import process_time
import tarfile
import shutil
from scripts.report import *
from zipfile import ZipFile
from tarfile import TarFile
parser = argparse.ArgumentParser(description='ALEAPP: Android Logs, Events, and Protobuf Parser.')
parser.add_argument('-o', choices=['fs','tar', 'zip'], required=True, action="store",help="Directory path, TAR, or ZIP filename and path(required).")
parser.add_argument('pathtodir',help='Path to directory')
# if len(sys.argv[1:])==0:
# parser.logfunc_help()
# parser.exit()
start = process_time()
args = parser.parse_args()
pathto = args.pathtodir
extracttype = args.o
start = process_time()
tosearch = {'wellbeing': '*/com.google.android.apps.wellbeing/databases/*',
'wellbeingaccount':'*/com.google.android.apps.wellbeing/files/AccountData.pb',
'usagestats':'*/usagestats/*',
'recentactivity':'*/system_ce/*'}
'''
tosearch = {'redditusers':'*Data/Application/*/Documents/*/accounts/*',
'redditchats':'*Data/Application/*/Documents/*/accountData/*/chat/*/chat.sqlite'}
'''
os.makedirs(reportfolderbase)
os.makedirs(reportfolderbase+'Script Logs')
logfunc('\n--------------------------------------------------------------------------------------')
logfunc('ALEAPP: Android Logs, Events, and Protobuf Parser')
logfunc('Objective: Triage iOS Full System Extractions.')
logfunc('By: Alexis Brignoni | @AlexisBrignoni | abrignoni.com')
if extracttype == 'fs':
logfunc(f'Artifact categories to parse: {str(len(tosearch))}')
logfunc(f'File/Directory selected: {pathto}')
logfunc('\n--------------------------------------------------------------------------------------')
logfunc( )
log = open(reportfolderbase+'Script Logs/ProcessedFilesLog.html', 'w+', encoding='utf8')
nl = '\n' #literal in order to have new lines in fstrings that create text files
log.write(f'Extraction/Path selected: {pathto}<br><br>')
# Search for the files per the arguments
for key, val in tosearch.items():
filefound = search(pathto, val)
if not filefound:
logfunc()
logfunc(f'No files found for {key} -> {val}.')
log.write(f'No files found for {key} -> {val}.<br>')
else:
logfunc()
globals()[key](filefound)
for pathh in filefound:
log.write(f'Files for {val} located at {pathh}.<br>')
log.close()
elif extracttype == 'tar':
logfunc(f'Artifact categories to parse: {str(len(tosearch))}')
logfunc(f'File/Directory selected: {pathto}')
logfunc('\n--------------------------------------------------------------------------------------')
log = open(reportfolderbase+'Script Logs/ProcessedFilesLog.html', 'w+', encoding='utf8')
nl = '\n' #literal in order to have new lines in fstrings that create text files
log.write(f'Extraction/Path selected: {pathto}<br><br>') # tar searches and function calls
t = TarFile(pathto)
for key, val in tosearch.items():
filefound = searchtar(t, val, reportfolderbase)
if not filefound:
logfunc()
logfunc(f'No files found for {key} -> {val}.')
log.write(f'No files found for {key} -> {val}.<br>')
else:
logfunc()
globals()[key](filefound)
for pathh in filefound:
log.write(f'Files for {val} located at {pathh}.<br>')
log.close()
elif extracttype == 'zip':
logfunc(f'Artifact categories to parse: {str(len(tosearch))}')
logfunc(f'File/Directory selected: {pathto}')
logfunc('\n--------------------------------------------------------------------------------------')
logfunc('')
log = open(reportfolderbase+'Script Logs/ProcessedFilesLog.html', 'w+', encoding='utf8')
log.write(f'Extraction/Path selected: {pathto}<br><br>') # tar searches and function calls
z = ZipFile(pathto)
name_list = z.namelist()
for key, val in tosearch.items():
filefound = searchzip(z, name_list, val, reportfolderbase)
if not filefound:
logfunc('')
logfunc(f'No files found for {key} -> {val}.')
log.write(f'No files found for {key} -> {val}.<br>')
else:
logfunc('')
globals()[key](filefound)
for pathh in filefound:
log.write(f'Files for {val} located at {pathh}.<br>')
log.close()
z.close()
else:
logfunc('Error on argument -o')
'''
if os.path.exists(reportfolderbase+'temp/'):
shutil.rmtree(reportfolderbase+'temp/')
#call reporting script
'''
#logfunc(f'iOS version: {versionf} ')
logfunc('')
logfunc('Processes completed.')
end = process_time()
time = start - end
logfunc("Processing time: " + str(abs(time)) )
log = open(reportfolderbase+'Script Logs/ProcessedFilesLog.html', 'a', encoding='utf8')
log.write(f'Processing time in secs: {str(abs(time))}')
log.close()
logfunc('')
logfunc('Report generation started.')
report(reportfolderbase, time, extracttype, pathto)
logfunc('Report generation Completed.')
logfunc('')
logfunc(f'Report name: {reportfolderbase}')
|
# tableau_db_connection.py
# =================================================
# Establishes connections to the Tableau PostgreSQL
# database.
#
# Database parameters are defined in
# moniteur_settings.py.
#
# =================================================
# =================================================
# imports
import psycopg2
import psycopg2.extras
import moniteur_settings
import logging
# =================================================
# Error logging
logging.basicConfig(filename='moniteur.log', level=logging.DEBUG)
# =================================================
# TABLEAU CONNECTION DECORATOR
# -------------------------------------------------
def tableau_db(func):
"""
Wrap a function in an idiomatic SQL transaction for interaction
with the Tableau 'workgroup' database. The wrapped function
should take a cursor as its first argument; other arguments will be
preserved.
"""
def new_func(*args, **kwargs):
conn = psycopg2.connect(database=moniteur_settings.TABLEAU_DB["dbname"],
user=moniteur_settings.TABLEAU_DB["user"],
password=moniteur_settings.TABLEAU_DB["password"],
host=moniteur_settings.TABLEAU_DB["host"],
port=moniteur_settings.TABLEAU_DB["port"])
# Define the cursor that will be passed to the wrapped functions.
# The cursor used is the 'RealDictCursor', which returns lists
# of dictionaries, each dictionary containing a row of data.
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
retval = func(cursor, *args, **kwargs)
except:
logging.info('Error connecting to the Tableau Postgres workgroup database.')
raise
finally:
cursor.close()
return retval
# Tidy up the help()-visible docstrings to be nice
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
return new_func
|
from django.conf.urls.defaults import patterns, url, include
from django.contrib import admin
from django_roa_client.views import home
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home),
)
|
import logging
import pytest
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import tier1, skipif_no_kms
from ocs_ci.framework.testlib import MCGTest
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources import pod
logger = logging.getLogger(__name__)
@skipif_no_kms
class TestNoobaaKMS(MCGTest):
"""
Test KMS integration with NooBaa
"""
@tier1
@pytest.mark.polarion_id("OCS-2485")
def test_noobaa_kms_validation(self):
"""
Validate from logs that there is successfully used NooBaa with KMS integration.
"""
logger.info("Getting the noobaa-operator pod and it's relevant metadata")
operator_pod = pod.get_pods_having_label(
label=constants.NOOBAA_OPERATOR_POD_LABEL,
namespace=config.ENV_DATA["cluster_namespace"],
)[0]
operator_pod_name = operator_pod["metadata"]["name"]
restart_count = operator_pod["status"]["containerStatuses"][0]["restartCount"]
logger.info("Looking for evidence of KMS integration in the logs of the pod")
target_log = "setKMSConditionType " + config.ENV_DATA["KMS_PROVIDER"]
operator_logs = pod.get_pod_logs(pod_name=operator_pod_name)
target_log_found = target_log in operator_logs
if not target_log_found and restart_count > 0:
logger.info("Checking the logs before the last pod restart")
operator_logs = pod.get_pod_logs(pod_name=operator_pod_name, previous=True)
target_log_found = target_log in operator_logs
assert (
target_log_found
), "No records were found of the integration of NooBaa and KMS"
|
from pynetest.expectations import expect
from pynetest.lib.matchers.matches_list_matcher import MatchesListMatcher
from pynetest.matchers import about
def test__matches_list_matcher__can_match():
expect([1, 2, 3, "banana"]).to_be(MatchesListMatcher([1, 2, 3, "banana"]))
def test__matches_list_matcher__when_lists_have_different_lengths__does_not_match():
expect([1, 2, 3, 4]).not_to_be(MatchesListMatcher([1, 2, 3, 4, 4]))
expect([1, 2, 3, 4, 4]).not_to_be(MatchesListMatcher([1, 2, 3, 4]))
def test__matches_list_matcher__when_lists_contain_different_items__does_not_match():
expect([1, 2, "banana"]).not_to_be(MatchesListMatcher([1, 3, "banana"]))
def test__matches_list_matcher__when_list_is_the_same_instance__does_not_match():
some_list = [1, 2, 3, 4]
expect(some_list).not_to_be(MatchesListMatcher(some_list))
def test__matches_list_matcher__when_comparing_empty_tuples__matches():
expect(()).to_be(MatchesListMatcher(()))
def test__matches_list_matcher__when_list_is_the_same_instance__explains_why_not():
some_list = [1, 2, 3, 4]
matcher = MatchesListMatcher(some_list)
matcher.matches(some_list)
expect(matcher.reason()).to_contain("it was the exact same instance")
def test__matches_list_matcher__supports_matchers_in_the_list():
expect([1]).to_be(MatchesListMatcher([about(1)]))
|
import comm
import config
import os
import sys
import xbmc
import xbmcgui
import xbmcplugin
from aussieaddonscommon import utils
pluginhandle = int(sys.argv[1])
def play(params):
try:
success = True
stream = comm.get_stream(params['video_id'])
utils.log('Attempting to play: {0} {1}'.format(stream['name'],
stream['url']))
item = xbmcgui.ListItem(label=stream['name'],
path=stream['url'])
item.setProperty('inputstreamaddon', 'inputstream.adaptive')
item.setProperty('inputstream.adaptive.manifest_type', 'hls')
item.setMimeType('application/vnd.apple.mpegurl')
item.setContentLookup(False)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem=item)
except Exception:
utils.handle_error('Unable to play video')
|
from nose.tools import assert_equal
class Solution:
# @return a boolean
def isInterleave(self, s1, s2, s3):
#return self._check_recursive_tle(s1, s2, s3)
return self._check_dp(s1, s2, s3)
def _check_dp(self, s1, s2, s3):
m = len(s1)
n = len(s2)
if m+n != len(s3):
return False
dp = [[False] * (n+1) for _ in range(m+1)]
dp[0][0] = True
for i in range(1, m+1):
if (s1[i-1] == s3[i-1]):
dp[i][0] = True
else:
break
for j in range(1, n+1):
if (s2[j-1] == s3[j-1]):
dp[0][j] = True
else:
break
for i in range(1, m+1):
for j in range(1, n+1):
dp[i][j] = (dp[i-1][j] and s1[i-1] == s3[i+j-1]) \
or (dp[i][j-1] and s2[j-1] == s3[i+j-1])
return dp[m][n]
def _check_recursive_tle(self, s1, s2, s3):
if not s1: return (s2 == s3)
if not s2: return (s1 == s3)
if len(s1) + len(s2) != len(s3):
return False
tab = [0] * 26
for c in s1:
tab[ord(c) - ord('a')] += 1
for c in s2:
tab[ord(c) - ord('a')] += 1
for c in s3:
tab[ord(c) - ord('a')] -= 1
for i in range(26):
if tab[i] != 0:
return False
if s1[0] == s2[0]:
if s1[0] != s3[0]:
return False
if self._check_recursive_tle(s1[1:], s2, s3[1:]):
return True
elif self._check_recursive_tle(s1, s2[1:], s3[1:]):
return True
return False
elif s1[0] == s3[0]:
return self._check_recursive_tle(s1[1:], s2, s3[1:])
elif s2[0] == s3[0]:
return self._check_recursive_tle(s1, s2[1:], s3[1:])
else:
return False
class TestSolution(object):
def test_simple(self):
pass
def test_example(self):
s = Solution()
assert_equal(s.isInterleave("aabcc", "dbbca", "aadbbcbcac"), True)
assert_equal(s.isInterleave("aabcc", "dbbca", "aadbbbaccc"), False)
assert_equal(s.isInterleave("a", "b", "a"), False)
assert_equal(s.isInterleave("aacaac", "aacaaeaac", "aacaaeaaeaacaac"), False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.