code
stringlengths 1
199k
|
|---|
import shutil
import tempfile
import sys
import libtorrent as lt
from time import sleep
import string
def magnet2torrent(magnet):
tempdir = tempfile.mkdtemp()
ses = lt.session()
params = {
'save_path': tempdir,
'duplicate_is_error': True,
'storage_mode': lt.storage_mode_t(2),
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
handle = lt.add_magnet_uri(ses, magnet, params)
#print("Downloading Metadata (this may take a while)")
count = 0
while (not handle.has_metadata()):
try:
sleep(1)
s = handle.status()
if s.active_time > 600:
count += 1
#print "Restarting Download Stalled"
ses.pause()
ses.remove_torrent(handle)
ses.resume()
handle = lt.add_magnet_uri(ses, magnet, params)
if count > 2:
break
except KeyboardInterrupt:
#print("Aborting...")
ses.pause()
#print("Cleanup dir " + tempdir)
shutil.rmtree(tempdir)
sys.exit(0)
ses.pause()
#print("Done")
filelist = []
if handle.has_metadata():
torinfo = handle.get_torrent_info()
filelist = []
for f in torinfo.files():
#filtereds = filter(lambda x: x in string.printable,str(f.path))
#print unicode(filtereds,'ascii')
#x = filtereds.decode('utf_8').encode('ascii')
#print x
#print filtereds.encode(encoding="unicode_escape")
fsplit = f.path.split('\\')
#sys.exit()
#print fsplit
filelist.append(fsplit[-1])
#torfile = lt.create_torrent(torinfo)
#torcontent = lt.bencode(torfile.generate())
#print torcontent
ses.remove_torrent(handle)
check = False
while check == False:
try:
shutil.rmtree(tempdir)
check = True
except:
check = False
if filelist == []:
return False
else:
return filelist
|
from distutils.core import setup
setup(name='DMS',
version='1.0.8',
description='DMS Master System',
author='Matthew Grant',
author_email='matt@mattgrant.net.nz',
url='http://mattgrant.net.nz/software/dms',
packages=['dms', 'dms.app', 'dms.database'])
|
"""
This is a modified version of django-avatar.
This is just a copy of the django-avatar app from https://github.com/grantmcconnaughey/django-avatar
but modified to work when avatars are linked to contacts, not a user model at all and I'm loathe
to shoehorn the Contact model into a User model definition just for the sake of getting the app to
work so I'm overwriting it on the whole.
"""
__version__ = '2.2.1'
|
import re
import os
import requests
import xlrd
import urllib
import time
from bs4 import BeautifulSoup
import zipfile
import getpass
save_url = "http://tkkc.hfut.edu.cn/student/exam/manageExam.do?1479131327464&method=saveAnswer"
index = 1
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/41.0",
"Host": "tkkc.hfut.edu.cn",
"X-Requested-With": "XMLHttpRequest",
# 'Content-Type': 'application/json,text/javascript,*/*'
}
ses = requests.session()
ID = input("请输入学号\n")
Pwd = getpass.getpass("请输入密码\n")
login_url = "http://tkkc.hfut.edu.cn/login.do?"
main_url = "http://tkkc.hfut.edu.cn"
main1_url = "http://tkkc.hfut.edu.cn/student/index.do"
def getcode():
im = ses.get("http://tkkc.hfut.edu.cn/getRandomImage.do")
tmp1 = urllib.parse.quote_from_bytes(im.content)
code = ses.post('http://api.hfutoyj.cn/codeapi', data={'image': tmp1}) # return verify code
return code
def get_new_data():
r = ses.get(login_url).text
announce = re.findall(r'name="(.*?)" value="announce"', r)[0]
return announce
rlt = 10
times = 1
while rlt > 3:
announce = get_new_data()
code = getcode().text
print("Trying " + ' ' + code)
logInfo = {
announce: 'announce',
'loginMethod': '{}button'.format(announce),
"logname": ID,
"password": Pwd,
"randomCode": code
}
res = ses.post(login_url, data=logInfo, headers=headers)
# print(res.text)
time.sleep(0.01)
times += 1
rlt -= 1
if res.text.find("验证码错误") != -1:
print("Wrong verify code, Trying again ...")
continue
elif res.text.find("身份验证服务器未建立连接") != -1:
print("Wrong student number, Check and reinput please ...")
ID = input("请重新输入学号\n")
continue
elif res.text.find("密码不正确") != -1:
print("Wrong password, Check and reinput please ...")
Pwd = getpass.getpass("请输入密码\n")
continue
else:
print('Login Success !')
break
else:
print("Maybe you typed wrong password")
# 用于存放excel中question, answer键值对的字典
result = dict()
def craw(url, retries=2):
try:
b = ses.post(url, headers=headers)
b.encoding = 'utf-8'
d = b.text
title = re.findall(r' (.*?)","', d, re.S)[0]
return title
except Exception as e:
print(e)
if retries > 0:
return craw(url, retries=retries - 1)
else:
print("get failed", index)
return ''
def answer_func(title):
return result.get(title, "Not Found")
def submit(ans, id, id2, id3, id4, index, retries=2):
dx = ["false", "false", "false", "false", "false"]
try:
if ans.find('A') != -1:
dx[0] = "true"
if ans.find('B') != -1:
dx[1] = "true"
if ans.find('C') != -1:
dx[2] = "true"
if ans.find('D') != -1:
dx[3] = "true"
if ans.find('E') != -1:
dx[4] = "true"
if ans.find('正确') != -1:
ans = "A"
if ans.find('错误') != -1:
ans = "B"
data2 = {"examReplyId": id3,
"examStudentExerciseId": id2,
"exerciseId": id,
"examId": id4,
"DXanswer": ans,
"duoxAnswer": ans,
"PDanswer": ans,
"DuoXanswerA": dx[0],
"DuoXanswerB": dx[1],
"DuoXanswerC": dx[2],
"DuoXanswerD": dx[3],
"DuoXanswerE": dx[4],
"DuoXanswer": ans} # 部分题库的多选是分成5个来提交,还有的是只用一个进行提交
body = ses.post(save_url, data=data2, headers=headers)
wb_data = body.text
print(wb_data, index)
except Exception as e:
print(e)
if retries > 0:
return submit(ans, id, id2, id3, id4, index, retries=retries - 1)
else:
print("get failed", index)
return ''
finished = 2
while (finished == 2) or (finished == 0):
if finished == 2:
html = ses.get(main1_url)
html = html.text
s = re.findall(r'courseId=(\d+)', html, re.M)
print("获取课程ID成功")
ss = list(set(s))
ss.sort(key=s.index)
print(ss)
courseId = input("请输入上一句打印出来的课程Id来进行自动化讨论和做题(如全部刷完请输入0退出):")
temp = courseId
course_url = main_url + "/student/teachingTask/coursehomepage.do?courseId=" + courseId
course_url = ses.get(course_url).text
# print("获取TaskId成功")
s1 = re.findall(r'teachingTaskId=(\d+)', course_url, re.M)
TaskId_url = main_url + "/student/resource/index.do?teachingTaskId=" + s1[0]
taskhomepage_url = main_url + "/student/teachingTask/taskhomepage.do?&teachingTaskId=" + s1[0]
taskhomepage_url = ses.get(taskhomepage_url).text
TaskId_url = ses.get(TaskId_url).text
faq_url = main_url + "/student/bbs/index.do?teachingTaskId=" + s1[0]
faq_url = ses.get(faq_url).text
s4 = re.findall(r'discussId=(\d+)', faq_url, re.M)
s5 = re.findall(r'forumId=(\d+)', faq_url, re.M)
discuss_url = main_url + "/student/bbs/manageDiscuss.do?&method=view&teachingTaskId=" + s1[0] + "&discussId=" + \
s4[0]
discuss_url = ses.get(discuss_url).text
post_url = main_url + "/student/bbs/manageDiscuss.do?method=reply"
soup = BeautifulSoup(discuss_url, "lxml")
content = soup.find_all("p")
if len(content):
content = content[-1]
post_data = {'discussId': s4[0], 'forumId': s5[0], 'type': 1, 'teachingTaskId': s1[0], 'content': content}
else:
discuss_url = main_url + "/student/bbs/manageDiscuss.do?&method=view&teachingTaskId=" + s1[
0] + "&discussId=" + s4[2]
discuss_url = ses.get(discuss_url).text
soup = BeautifulSoup(discuss_url, "lxml")
content = soup.find_all("p")
content = content[-1]
post_data = {'discussId': s4[2], 'forumId': s5[0], 'type': 1, 'teachingTaskId': s1[0], 'content': content}
# print (content)
p = ses.post(post_url, data=post_data, headers=headers)
# print ("参加讨论成功一次")
discuss_url1 = main_url + "/student/bbs/manageDiscuss.do?&method=view&teachingTaskId=" + s1[0] + "&discussId=" + \
s4[1]
discuss_url1 = ses.get(discuss_url1).text
soup = BeautifulSoup(discuss_url1, "lxml")
content1 = soup.find_all("p")
if len(content1):
content1 = content1[-1]
post_data1 = {'discussId': s4[1], 'forumId': s5[0], 'type': 1, 'teachingTaskId': s1[0], 'content': content1}
else:
discuss_url1 = main_url + "/student/bbs/manageDiscuss.do?&method=view&teachingTaskId=" + s1[
0] + "&discussId=" + s4[3]
discuss_url1 = ses.get(discuss_url1).text
soup = BeautifulSoup(discuss_url1, "lxml")
content1 = soup.find_all("p")
content1 = content1[-1]
post_data1 = {'discussId': s4[3], 'forumId': s5[0], 'type': 1, 'teachingTaskId': s1[0], 'content': content1}
# content1 = content1[-1]
# print (content1)
p1 = ses.post(post_url, data=post_data1, headers=headers)
print("参加讨论成功两次")
finished = 0
if finished == 0:
html = ses.get(main1_url).text
s = re.findall(r'courseId=(\d+)', html, re.M)
# print("获取课程ID成功")
ss = list(set(s))
ss.sort(key=s.index)
# print (ss)
courseId = temp
# print("获取TaskId成功")
s1 = re.findall(r'teachingTaskId=(\d+)', course_url, re.M)
s2 = re.findall(r'"id":(\d+)', TaskId_url, re.M)
down_url = main_url + "/filePreviewServlet?indirect=true&resourceId=" + s2[0]
# print("获取下载链接成功")
d = ses.get(down_url)
with open("excel.zip", "wb") as code:
code.write(d.content)
file_list = os.listdir(r'.')
for file_name in file_list:
if os.path.splitext(file_name)[1] == '.zip':
print("下载题库文件并解压完成")
file_zip = zipfile.ZipFile(file_name, 'r')
for file in file_zip.namelist():
file_zip.extract(file, r'.')
file_zip.close()
os.remove(file_name)
s3 = re.findall(r'assignmentId=(\d+)', taskhomepage_url, re.M)
# print (s3)
s4 = re.findall(r'examId=(\d+)', taskhomepage_url, re.M)
# exam_url = main_url+"/student/exam/manageExam.do?&method=doExam&examId="+s4[0]
test_url = main_url + "/student/assignment/manageAssignment.do?method=doAssignment&assignmentId=" + s3[0]
test_url2 = main_url + "/student/assignment/manageAssignment.do?method=doAssignment&assignmentId=" + s3[1]
test_url3 = main_url + "/student/assignment/manageAssignment.do?method=doAssignment&assignmentId=" + s3[2]
if len(s3) == 3:
start_url_list = [test_url, test_url2, test_url3]
elif len(s3) == 4:
test_url4 = main_url + "/student/assignment/manageAssignment.do?method=doAssignment&assignmentId=" + s3[3]
start_url_list = [test_url, test_url2, test_url3, test_url4]
else:
test_url5 = main_url + "/student/assignment/manageAssignment.do?method=doAssignment&assignmentId=" + s3[4]
start_url_list = [test_url, test_url2, test_url3, test_url4, test_url5]
# print("获取练习题目链接成功")
# print (test_url,"\n",test_url2,'\n',test_url3)
# start_url_list = [test_url,test_url2,test_url3,test_url4]
for start_url in start_url_list:
# print (exam_url)
# start_url = input("请输入练习题目链接(就在上面↑)\n")
myfile = xlrd.open_workbook('exercise.xls')
lenOfXls = len(myfile.sheets())
# 存储sheet名字的列表
sheet_names = myfile.sheet_names()
# 题库excel文件的类型
# 3:单 双 判断
# 2:单 双
# 3:单 判断
if len(sheet_names) == 3:
excel_type = 3
elif '多选题' in sheet_names:
excel_type = 2
else:
excel_type = 1
# 读取XLS中的题目和答案,存进字典(将这段程序放在这,是因为当用户有多门试题库时,刷完一门,切换到另一门时,不用关闭程序只需切换题库Excel即可)
for x in range(0, lenOfXls):
xls = myfile.sheets()[x]
for i in range(1, xls.nrows):
title = xls.cell(i, 0).value.strip()
if x == 1 and lenOfXls == 2:
if excel_type == 2:
answer = xls.cell(i, 7).value
else:
answer = xls.cell(i, 2).value
elif x == 1 and lenOfXls == 3:
answer = xls.cell(i, 7).value
elif x == 2 and lenOfXls == 3:
answer = xls.cell(i, 2).value
else:
answer = xls.cell(i, 7).value
result[title] = answer
body = ses.get(start_url, headers=headers)
body.encoding = 'utf-8'
wb_data = body.text
# print(wb_data)
eval = re.findall(r'eval(.*?)]\);', wb_data, re.S)[0]
bs = BeautifulSoup(wb_data, 'lxml')
val = bs.form.input
examReplyId = val['value']
examId = re.findall(r'<input type="hidden" name="examId" id="examId" value="(.*?)" />', wb_data, re.S)[0]
exerciseId = re.findall(r'exerciseId":(.*?),', eval, re.S)
examSEId = re.findall(r'examStudentExerciseId":(.*?),', eval, re.S)
examStudentExerciseId = re.findall(r'"examStudentExerciseId":(.*?),"exerciseId"',
wb_data, re.S)[0]
examStudentExerciseId = int(examStudentExerciseId)
# id对应exerciseID,id2对应examStudetExerciseId
for id in exerciseId:
next_url = r"http://tkkc.hfut.edu.cn/student/exam/manageExam.do?method=getExerciseInfo&examReplyId=%s&exerciseId=%s&examStudentExerciseId=%d" % (
examReplyId, id, examStudentExerciseId)
title = craw(next_url).strip()
ans = answer_func(title)
submit(ans, id, examStudentExerciseId, examReplyId, examId, index)
# time.sleep(1)
index += 1
examStudentExerciseId = examStudentExerciseId + 1
# input函数获取到的为字符串,所以进行Type conversion
finished = 2
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20170223_1850'),
]
operations = [
migrations.AlterField(
model_name='extract',
name='money',
field=models.FloatField(verbose_name='Money'),
),
]
|
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
from core import tmdb
from core import httptools
from core.item import Item
from core import servertools
from core import scrapertools
from bs4 import BeautifulSoup
from channelselector import get_thumb
from platformcode import config, logger
from channels import filtertools, autoplay
list_language = list()
list_quality = []
list_servers = ['supervideo', "vidcloud", "myvy"]
host = "https://mycinedesiempre.blogspot.com/"
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Ultimas", url=host, action="list_all",
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Cine Español", url=host + "search/label/España",
action="list_all", thumbnail=get_thumb('españolas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Cine Latino", url=host + "search/label/Latino",
action="list_all", thumbnail=get_thumb('latino', auto=True)))
itemlist.append(Item(channel=item.channel, title="Asiaticas", url=host + "search/label/Asiático",
action="list_all", thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar...", url=host + 'search?q=', action="search",
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find("div", class_="main section").find_all("div", class_="post bar hentry")
for elem in matches:
url = elem.h2.a["href"]
title = re.sub(r'-.*|\(.*', '', elem.h2.text).strip()
thumb = elem.img["src"]
try:
year = elem.find("dd", itemprop="datePublished").text
except:
year = "-"
itemlist.append(Item(channel=item.channel, title=title, url=url, action='findvideos',
thumbnail=thumb, contentTitle=title, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
url_next_page = soup.find("a", class_="blog-pager-older-link")["href"]
if url_next_page and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
except:
pass
return itemlist
def section(item):
logger.info()
itemlist = list()
listed = list()
soup = create_soup(host)
if item.title == "Generos":
matches = soup.find_all("a", href=re.compile(r"%ssearch.*?" % host), rel="tag")
for elem in matches:
url = elem["href"]
title = elem.text
if url not in listed:
itemlist.append(Item(channel=item.channel, title=title, action="list_all", url=url))
listed.append(url)
return sorted(itemlist, key=lambda x: x.title)
def findvideos(item):
logger.info()
itemlist = list()
soup = create_soup(item.url).find("div", class_="post bar hentry")
red_links = soup.find_all("a", target="_blank")
for link in red_links:
url = link["href"]
if 'filmaffinity' in url:
continue
language = get_lang(link.text)
itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
infoLabels=item.infoLabels, language=language))
try:
player_src = (soup.find("iframe")["src"])
itemlist.append(Item(channel=item.channel, title='%s', action='play', url=player_src, infoLabels=item.infoLabels))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
for line in sys.exc_info():
logger.error("%s" % line)
return []
def get_lang(title):
language = ''
title = title.lower()
dict_lang = {'castellano': 'CAST', 'spanish': 'CAST',
'latino': 'LATINO', 'vose': 'VOSE',
'subtitulado': 'VOSE'
}
for lang in list(dict_lang.keys()):
if lang in title:
language = dict_lang[lang]
break
return language
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import push_basedir
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
self._loader.set_basedir(self._basedir)
# also add the basedir to the list of module directories
push_basedir(self._basedir)
ds = self._loader.load_from_file(os.path.basename(file_name))
if not isinstance(ds, list):
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if 'include' in entry:
entry_obj = PlaybookInclude.load(entry, variable_manager=variable_manager, loader=self._loader)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
def get_loader(self):
return self._loader
def get_entries(self):
return self._entries[:]
|
class Hand(object):
def __init__(self):
self.hand = []
def add(self, card):
self.hand.append(card)
def create(self, typed_hand):
for i in range(0, len(typed_hand), 2):
self.hand.append((typed_hand[i], typed_hand[i+1]))
def status(self):
return self.hand
if __name__ == '__main__':
hand = Hand()
hand.create("hJdA")
print(hand.status())
|
"""Unit tests for the :mod:`iris.fileformats` package."""
import iris.tests as tests
class TestField(tests.IrisTest):
def _test_for_coord(self, field, convert, coord_predicate, expected_points,
expected_bounds):
(factories, references, standard_name, long_name, units,
attributes, cell_methods, dim_coords_and_dims,
aux_coords_and_dims) = convert(field)
# Check for one and only one matching coordinate.
coords_and_dims = dim_coords_and_dims + aux_coords_and_dims
matching_coords = [coord for coord, _ in coords_and_dims if
coord_predicate(coord)]
self.assertEqual(len(matching_coords), 1, str(matching_coords))
coord = matching_coords[0]
# Check points and bounds.
if expected_points is not None:
self.assertArrayEqual(coord.points, expected_points)
if expected_bounds is None:
self.assertIsNone(coord.bounds)
else:
self.assertArrayEqual(coord.bounds, expected_bounds)
def assertCoordsAndDimsListsMatch(self, coords_and_dims_got,
coords_and_dims_expected):
"""
Check that coords_and_dims lists are equivalent.
The arguments are lists of pairs of (coordinate, dimensions).
The elements are compared one-to-one, by coordinate name (so the order
of the lists is _not_ significant).
"""
def sorted_by_coordname(list):
return sorted(list, key=lambda item: item[0].name())
coords_and_dims_got = sorted_by_coordname(coords_and_dims_got)
coords_and_dims_expected = sorted_by_coordname(
coords_and_dims_expected)
self.assertEqual(coords_and_dims_got, coords_and_dims_expected)
|
import requests
import sys
base_url = 'http://lgapi-eu.libapps.com/1.2/az'
auth_url = 'http://lgapi-eu.libapps.com/1.2/oauth/token'
auth_credentials = {'client_id': '267',
'client_secret': '47c133be1eff42f213051f55865bd59b',
'grant_type': 'client_credentials'}
try:
r = requests.post(auth_url, data=auth_credentials)
token = r.json()['access_token']
# {u'access_token': u'a9e09e29dd0e58f20cf5946a525f539b23c76859',
# u'expires_in': 3600,
# u'scope': u'az_get az_create az_update',
# u'token_type': u'Bearer'}
except:
print 'Unable to authenticate'
sys.exit(0)
data = {"name": "This is a test", "description": "This test uses the API", "enable_new": "1",
"owner_id": 134336 }
headers = {"Authorization":"Bearer %s" % token}
r = requests.post(base_url, data=data, headers=headers)
|
import click
import glob
import os
import logging
import pytz
import re
from datetime import datetime
from podgen import Podcast, Episode, Media, Person, Category
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError
@click.command()
@click.option('--name', required=True, help='the name of the podcast')
@click.option('--description', required=True,
help='the description of the podcast')
@click.option('--website', required=True,
help='the url of the website of the podcast')
@click.option('--explicit/--no-explicit', default=False,
help='is the podcast explicit?')
@click.option('--author_name', required=True,
help='the authors of the podcast')
@click.option('--author_email', help='the email of the podcast')
@click.option('--image', help='the url of the cover image for the podcast \
(minimun 1400x1400px, jpg or png)')
@click.option('--feed_path', default='',
help='the path of the podcast on website')
@click.option('--copyright', help='copyright informations')
@click.option('--language', default='en-EN',
help='podcast language in ISO-639')
@click.option('--category', default='Music',
help='podcast category')
@click.option('--blog/--no-blog', default=False,
help='try to guess episode blog post')
@click.option('--blog_path', default='',
help='path to blog posts')
@click.option('--verbose/--no-verbose', default=False,
help='debug mode')
@click.argument('folder')
def generate(name, description, website, explicit, image, author_name,
author_email, feed_path, copyright, language, category,
blog, blog_path, verbose, folder):
"""Generate a podcast from mp3 files located in the provided FOLDER"""
if verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
attrs = locals()
logging.debug('Processing input: %s' % (attrs))
del attrs['folder']
del attrs['author_name']
del attrs['author_email']
del attrs['verbose']
del attrs['feed_path']
del attrs['blog']
del attrs['blog_path']
attrs['authors'] = [Person(author_name, author_email)]
attrs['owner'] = attrs['authors'][0]
attrs['category'] = Category(category)
feed_name = name.lower().replace(' ', '_') + '.rss'
feed_base = '%s/%s' % (website, feed_path)
feed_url = '%s/%s' % (feed_base, feed_name)
attrs['feed_url'] = feed_url
logging.info('Creating podcast %s, feed %s' % (name, feed_url))
p = Podcast(**attrs)
for fpath in sorted(glob.glob('%s*.mp3' % (folder))):
logging.info('Adding episode %s' % (fpath))
fname = os.path.basename(fpath)
size = os.path.getsize(fpath)
logging.debug('Filename: %s, size %i' % (fname, size))
try:
tag = ID3(fpath)
except ID3NoHeaderError:
logging.error('%s is not a valid mp3 file, ignoring it' % (fpath))
continue
logging.debug('Read tag: %s' % (tag))
e = Episode()
if 'TPE1' in tag:
e.authors = [Person(tag['TPE1'][0])]
else:
e.authors = attrs['authors']
e.title = tag['TIT2'][0]
e.subtitle = e.title
if 'COMM::eng' in tag:
e.summary = tag['COMM::eng'][0]
else:
e.summary = description
episode_url = '%s/%s' % (feed_base, fname)
logging.debug('Episode url: %s' % (episode_url))
e.media = Media(episode_url, size, type='audio/mpeg')
e.media.populate_duration_from(fpath)
pubdate = datetime.strptime(tag['TDRC'][0].text[:10], '%Y-%m-%d')
pubdate = pubdate.replace(tzinfo=pytz.utc)
e.publication_date = pubdate
if blog:
blog_post = ''
short_name = re.search('[a-z]*_-_([a-z_]*[#0-9]*)', fname)
if short_name:
blog_post = short_name.group(1).replace('_', '-').\
replace('#', '') + '.html'
e.link = '%s/%s/%s' % (website, blog_path, blog_post)
p.episodes.append(e)
feed_local_path = '%s%s' % (folder, feed_name)
logging.info('Generating feed in %s' % (feed_local_path))
p.rss_file(feed_local_path, minimize=False)
if __name__ == '__main__':
generate()
|
from random import random
from opc.utils.prof import timefunc
poly = {
2: (2, 1),
3: (3, 2),
4: (4, 3),
5: (5, 3),
6: (6, 5),
7: (7, 6),
8: (8, 6, 5, 4),
9: (9, 5),
10: (10, 7),
11: (11, 9),
12: (12, 11, 14),
13: (13, 12, 11, 8),
14: (14, 13, 12, 2),
15: (15, 14),
16: (16, 14, 13, 11),
}
def bit(value, bit):
return (value >> bit) & 1
@timefunc
def lfsr(seed=None, bits=8, base=0):
"""
simulate a LFSR for a given number of bits. Seed=0 is special
in that it specifies that a random value should be chosen as the
seed
"""
if seed is None:
seed = bits
elif seed == 0:
seed = int(random()*(2**bits-1))
try:
toggles = poly[bits]
except:
raise Exception("%d bit LFSRs aren't supported" % bits)
lfsr = seed
starting = True
while lfsr != seed or starting:
yield base+lfsr
starting = False
tap = 0
for toggle in toggles:
position = bits-toggle
tap ^= bit(lfsr, position)
lfsr = (lfsr >> 1) | (tap << bits-1)
yield base
class LfsrBucket(object):
def __init__(self, slots, base=0):
self.content = []
self.ptr = 0
# figure out our base power. If this is first time
# in, and there is enough elbow room, then start with
# a slightly smaller chunk (to improve distribution)
power = self._invPow2(slots)
if base == 0 and power > 2:
power -= 1
while slots >= 2**power:
self._addLfsr(power, base)
slots -= 2**power
base += 2**power
# if there's anything left over, deal with it as a
# sub-bucket
if slots > 0:
self._addBucket(slots, base)
def _invPow2(self, value):
"""
This is a brute force approach to get the closest power of two
that is greater than the value presented. But it will give
adequate performance, given our application.
"""
power = 0
while 2**power <= value:
power += 1
return power-1
def _addLfsr(self, bits, base):
self.content.append(lfsr(seed=0, bits=bits, base=base))
def _addBucket(self, slots, base):
self.content.append(LfsrBucket(slots, base))
def buckets(self):
return len(self.content)
@timefunc
def get(self, level=0):
"""
Get the next value from the bucket. A candidate can either
be a LFSR, or it can be a sub-bucket. We'll need to check
for all and handle exceptions appropriately.
"""
candidate = self.content[self.ptr]
self.ptr = (self.ptr+1) % len(self.content)
try:
return candidate.get(level+1)
except AttributeError:
try:
return candidate.next()
except StopIteration:
return None
@timefunc
def compoundLfsr(slots):
"""
Combine a number of LFSRs to support an arbitary range of
once-visit values, most of the heavy lifting is done in a
class.
figure out largest power of two that is smaller than places
add as many of these to the pool that fit, this is a bucket.
take the remainder and repeat until there is nothing left.
while generating visit each bucket in round-robin sequence,
until all of the buckets report empty.
"""
pool = LfsrBucket(slots)
while True:
failures = 0
while True:
value = pool.get()
if value is not None:
yield value
break
failures += 1
if failures == pool.buckets():
# nome of the buckets have anything left, the
# supply of values is exhausted
return
|
'''Descriptor classes defined in this file are "intermediary" classes that
gather, from the user application, information about found gen- or workflow-
classes.'''
import types, copy
import appy.gen as gen
from . import po
from .model import ModelClass
from .utils import produceNiceMessage, getClassName
TABS = 4 # Number of blanks in a Python indentation.
class Descriptor: # Abstract
def __init__(self, klass, orderedAttributes, generator):
# The corresponding Python class
self.klass = klass
# The names of the static appy-compliant attributes declared in
# self.klass
self.orderedAttributes = orderedAttributes
# A reference to the code generator.
self.generator = generator
def __repr__(self): return '<Class %s>' % self.klass.__name__
class ClassDescriptor(Descriptor):
'''This class gives information about an Appy class.'''
def __init__(self, klass, orderedAttributes, generator):
Descriptor.__init__(self, klass, orderedAttributes, generator)
self.methods = '' # Needed method definitions will be generated here
self.name = getClassName(self.klass, generator.applicationName)
self.predefined = False
self.customized = False
# Phase and page names will be calculated later, when first required.
self.phases = None
self.pages = None
def getOrderedAppyAttributes(self, condition=None):
'''Returns the appy types for all attributes of this class and parent
class(es). If a p_condition is specified, ony Appy types matching
the condition will be returned. p_condition must be a string
containing an expression that will be evaluated with, in its context,
"self" being this ClassDescriptor and "attrValue" being the current
Type instance.
Order of returned attributes already takes into account type's
"move" attributes.'''
attrs = []
# First, get the attributes for the current class
for attrName in self.orderedAttributes:
try:
attrValue = getattr(self.klass, attrName)
hookClass = self.klass
except AttributeError:
attrValue = getattr(self.modelClass, attrName)
hookClass = self.modelClass
if isinstance(attrValue, gen.Field):
if not condition or eval(condition):
attrs.append( (attrName, attrValue, hookClass) )
# Then, add attributes from parent classes
for baseClass in self.klass.__bases__:
# Find the classDescr that corresponds to baseClass
baseClassDescr = None
for classDescr in self.generator.classes:
if classDescr.klass == baseClass:
baseClassDescr = classDescr
break
if baseClassDescr:
attrs = baseClassDescr.getOrderedAppyAttributes() + attrs
# Modify attributes order by using "move" attributes
res = []
for name, appyType, klass in attrs:
if appyType.move:
newPosition = len(res) - abs(appyType.move)
if newPosition <= 0:
newPosition = 0
res.insert(newPosition, (name, appyType, klass))
else:
res.append((name, appyType, klass))
return res
def getChildren(self):
'''Returns, among p_allClasses, the classes that inherit from p_self.'''
res = []
for classDescr in self.generator.classes:
if (classDescr.klass != self.klass) and \
issubclass(classDescr.klass, self.klass):
res.append(classDescr)
return res
def getPhases(self):
'''Lazy-gets the phases defined on fields of this class.'''
if not hasattr(self, 'phases') or (self.phases == None):
self.phases = []
for fieldName, appyType, klass in self.getOrderedAppyAttributes():
if appyType.page.phase in self.phases: continue
self.phases.append(appyType.page.phase)
return self.phases
def getPages(self):
'''Lazy-gets the page names defined on fields of this class.'''
if not hasattr(self, 'pages') or (self.pages == None):
self.pages = []
for fieldName, appyType, klass in self.getOrderedAppyAttributes():
if appyType.page.name in self.pages: continue
self.pages.append(appyType.page.name)
return self.pages
def getParents(self, allClasses):
parentWrapper = 'AbstractWrapper'
parentClass = '%s.%s' % (self.klass.__module__, self.klass.__name__)
if self.klass.__bases__:
baseClassName = self.klass.__bases__[0].__name__
for k in allClasses:
if self.klass.__name__ == baseClassName:
parentWrapper = '%s_Wrapper' % k.name
return (parentWrapper, parentClass)
def generateSchema(self):
'''Generates i18n and other related stuff for this class.'''
for attrName in self.orderedAttributes:
try:
attrValue = getattr(self.klass, attrName)
except AttributeError:
attrValue = getattr(self.modelClass, attrName)
if not isinstance(attrValue, gen.Field): continue
FieldDescriptor(attrName, attrValue, self).generate()
def isAbstract(self):
'''Is self.klass abstract?'''
res = False
if 'abstract' in self.klass.__dict__:
res = self.klass.__dict__['abstract']
return res
def isRoot(self):
'''Is self.klass root? A root class represents some kind of major
concept into the application. For example, creating instances
of such classes will be easy from the user interface.'''
res = False
if 'root' in self.klass.__dict__:
res = self.klass.__dict__['root']
return res
def isFolder(self, klass=None):
'''Must self.klass be a folder? If klass is not None, this method tests
it on p_klass instead of self.klass.'''
res = False
theClass = self.klass
if klass:
theClass = klass
if 'folder' in theClass.__dict__:
res = theClass.__dict__['folder']
else:
if theClass.__bases__:
res = self.isFolder(theClass.__bases__[0])
return res
def getCreators(self):
'''Gets the specific creators defined for this class, excepted if
attribute "creators" does not contain a list or roles.'''
res = []
if not hasattr(self.klass, 'creators'): return res
if not isinstance(self.klass.creators, list): return res
for creator in self.klass.creators:
if isinstance(creator, gen.Role):
if creator.local:
raise 'Local role "%s" cannot be used as a creator.' % \
creator.name
res.append(creator)
else:
res.append(gen.Role(creator))
return res
def getCreateMean(self, type='Import'):
'''Returns the mean for this class that corresponds to p_type, or
None if the class does not support this create mean.'''
if 'create' not in self.klass.__dict__: return
else:
means = self.klass.create
if not means: return
if not isinstance(means, tuple) and not isinstance(means, list):
means = [means]
for mean in means:
exec('found = isinstance(mean, %s)' % type)
if found: return mean
@staticmethod
def getSearches(klass, tool=None):
'''Returns the list of searches that are defined on this class. If
p_tool is given, we are at execution time (not a generation time),
and we may potentially execute search.show methods that allow to
conditionnaly include a search or not.'''
if 'search' in klass.__dict__:
searches = klass.__dict__['search']
if not tool: return searches
# Evaluate attributes "show" for every search.
return [s for s in searches if s.isShowable(klass, tool)]
return []
@staticmethod
def getSearch(klass, searchName):
'''Gets the search named p_searchName.'''
for search in ClassDescriptor.getSearches(klass):
if search.name == searchName:
return search
def addIndexMethod(self, field):
'''For indexed p_field, this method generates a method that allows to
get the value of the field as must be copied into the corresponding
index.'''
m = self.methods
spaces = TABS
n = field.fieldName
m += '\n' + ' '*spaces + 'def get%s%s(self):\n' % (n[0].upper(), n[1:])
spaces += TABS
m += ' '*spaces + "'''Gets indexable value of field \"%s\".'''\n" % n
m += ' '*spaces + 'return self.getAppyType("%s").getIndexValue(' \
'self)\n' % n
self.methods = m
def addField(self, fieldName, fieldType):
'''Adds a new field to the Tool.'''
exec("self.modelClass.%s = fieldType" % fieldName)
if fieldName in self.modelClass._appy_attributes:
print(('Warning, field "%s" is already existing on class "%s"' % \
(fieldName, self.modelClass.__name__)))
return
self.modelClass._appy_attributes.append(fieldName)
self.orderedAttributes.append(fieldName)
class WorkflowDescriptor(Descriptor):
'''This class gives information about an Appy workflow.'''
@staticmethod
def getWorkflowName(klass):
'''Returns the name of this workflow.'''
res = klass.__module__.replace('.', '_') + '_' + klass.__name__
return res.lower()
class FieldDescriptor:
'''This class gathers information about a specific typed attribute defined
in a gen-class.'''
def __init__(self, fieldName, appyType, classDescriptor):
self.appyType = appyType
self.classDescr = classDescriptor
self.generator = classDescriptor.generator
self.applicationName = classDescriptor.generator.applicationName
self.fieldName = fieldName
self.fieldParams = {'name': fieldName}
self.widgetParams = {}
self.fieldType = None
self.widgetType = None
def i18n(self, id, default, nice=True):
'''Shorthand for adding a new message into self.generator.labels.'''
self.generator.labels.append(id, default, nice=nice)
def __repr__(self):
return '<Field %s, %s>' % (self.fieldName, self.classDescr)
def produceMessage(self, msgId, isLabel=True):
'''Gets the default label, description or help (depending on p_msgType)
for i18n message p_msgId.'''
default = ' '
niceDefault = False
if isLabel:
niceDefault = True
default = self.fieldName
return msgId, default, niceDefault
def walkString(self):
'''How to generate an Appy String?'''
if self.appyType.isSelect and \
(type(self.appyType.validator) in (list, tuple)):
# Generate i18n messages for every possible value if the list
# of values is fixed.
for value in self.appyType.validator:
label = '%s_%s_list_%s' % (self.classDescr.name,
self.fieldName, value)
self.i18n(label, value)
def walkAction(self):
'''Generates the i18n-related label.'''
if self.appyType.confirm:
label = '%s_%s_confirm' % (self.classDescr.name, self.fieldName)
self.i18n(label, po.CONFIRM, nice=False)
def walkRef(self):
'''How to generate a Ref?'''
# Add the label for the confirm message if relevant
if self.appyType.addConfirm:
label = '%s_%s_addConfirm' % (self.classDescr.name, self.fieldName)
self.i18n(label, po.CONFIRM, nice=False)
def walkList(self):
# Add i18n-specific messages
for name, field in self.appyType.fields:
label = '%s_%s_%s' % (self.classDescr.name, self.fieldName, name)
self.i18n(label, name)
if field.hasDescr:
self.i18n('%s_descr' % label, ' ')
if field.hasHelp:
self.i18n('%s_help' % label, ' ')
def walkCalendar(self):
# Add i18n-specific messages
eTypes = self.appyType.eventTypes
if not isinstance(eTypes, list) and not isinstance(eTypes, tuple):return
for et in self.appyType.eventTypes:
label = '%s_%s_event_%s' % (self.classDescr.name,self.fieldName,et)
self.i18n(label, et)
def walkAppyType(self):
'''Walks into the Appy type definition and gathers data about the
i18n labels.'''
# Manage things common to all Appy types
# Put an index on this field?
if self.appyType.indexed and (self.fieldName != 'title'):
self.classDescr.addIndexMethod(self)
# i18n labels
if not self.appyType.label:
# Create labels for generating them in i18n files, only if required.
i18nPrefix = '%s_%s' % (self.classDescr.name, self.fieldName)
if self.appyType.hasLabel:
self.i18n(*self.produceMessage(i18nPrefix))
if self.appyType.hasDescr:
descrId = i18nPrefix + '_descr'
self.i18n(*self.produceMessage(descrId,isLabel=False))
if self.appyType.hasHelp:
helpId = i18nPrefix + '_help'
self.i18n(*self.produceMessage(helpId, isLabel=False))
# Create i18n messages linked to pages and phases, only if there is more
# than one page/phase for the class.
if len(self.classDescr.getPhases()) > 1:
# Create the message for the name of the phase
phaseName = self.appyType.page.phase
msgId = '%s_phase_%s' % (self.classDescr.name, phaseName)
self.i18n(msgId, phaseName)
if len(self.classDescr.getPages()) > 1:
# Create the message for the name of the page
pageName = self.appyType.page.name
msgId = '%s_page_%s' % (self.classDescr.name, pageName)
self.i18n(msgId, pageName)
# Create i18n messages linked to groups
group = self.appyType.group
if group and not group.label:
group.generateLabels(self.generator.labels, self.classDescr, set())
# Manage things which are specific to String types
if self.appyType.type == 'String': self.walkString()
# Manage things which are specific to Actions
elif self.appyType.type == 'Action': self.walkAction()
# Manage things which are specific to Ref types
elif self.appyType.type == 'Ref': self.walkRef()
# Manage things which are specific to List types
elif self.appyType.type == 'List': self.walkList()
# Manage things which are specific to Calendar types
elif self.appyType.type == 'Calendar': self.walkCalendar()
def generate(self):
'''Generates the i18n labels for this type.'''
self.walkAppyType()
class ToolClassDescriptor(ClassDescriptor):
'''Represents the POD-specific fields that must be added to the tool.'''
def __init__(self, klass, generator):
ClassDescriptor.__init__(self,klass,klass._appy_attributes[:],generator)
self.modelClass = self.klass
self.predefined = True
self.customized = False
def getParents(self, allClasses=()):
res = ['Tool']
if self.customized:
res.append('%s.%s' % (self.klass.__module__, self.klass.__name__))
return res
def update(self, klass, attributes):
'''This method is called by the generator when he finds a custom tool
definition. We must then add the custom tool elements in this default
Tool descriptor.'''
self.orderedAttributes += attributes
self.klass = klass
self.customized = True
def isFolder(self, klass=None): return True
def isRoot(self): return False
def addImportRelatedFields(self, classDescr):
'''Adds, for class p_classDescr, attributes related to the import
functionality for class p_classDescr.'''
className = classDescr.name
# Field that defines the path of the files to import.
fieldName = 'importPathFor%s' % className
defValue = classDescr.getCreateMean('Import').path
fieldType = gen.String(page='data', multiplicity=(1,1),
default=defValue,group=classDescr.klass.__name__)
self.addField(fieldName, fieldType)
class UserClassDescriptor(ClassDescriptor):
'''Appy-specific class for representing a user.'''
def __init__(self, klass, generator):
ClassDescriptor.__init__(self,klass,klass._appy_attributes[:],generator)
self.modelClass = self.klass
self.predefined = True
self.customized = False
def getParents(self, allClasses=()):
res = ['User']
if self.customized:
res.append('%s.%s' % (self.klass.__module__, self.klass.__name__))
return res
def update(self, klass, attributes):
'''This method is called by the generator when he finds a custom user
definition. We must then add the custom user elements in this
default User descriptor.'''
self.orderedAttributes += attributes
self.klass = klass
self.customized = True
def isFolder(self, klass=None): return False
class GroupClassDescriptor(ClassDescriptor):
'''Represents the class that corresponds to the Group for the generated
application.'''
def __init__(self, klass, generator):
ClassDescriptor.__init__(self,klass,klass._appy_attributes[:],generator)
self.modelClass = self.klass
self.predefined = True
self.customized = False
def getParents(self, allClasses=()):
res = ['Group']
if self.customized:
res.append('%s.%s' % (self.klass.__module__, self.klass.__name__))
return res
def update(self, klass, attributes):
'''This method is called by the generator when he finds a custom group
definition. We must then add the custom group elements in this
default Group descriptor.
NOTE: currently, it is not possible to define a custom Group
class.'''
self.orderedAttributes += attributes
self.klass = klass
self.customized = True
def isFolder(self, klass=None): return False
class TranslationClassDescriptor(ClassDescriptor):
'''Represents the set of translation ids for a gen-application.'''
def __init__(self, klass, generator):
ClassDescriptor.__init__(self,klass,klass._appy_attributes[:],generator)
self.modelClass = self.klass
self.predefined = True
self.customized = False
def getParents(self, allClasses=()): return ('Translation',)
def isFolder(self, klass=None): return False
def addLabelField(self, messageId, page):
'''Adds a Computed field that will display, in the source language, the
content of the text to translate.'''
field = gen.Computed(method=self.modelClass.label, plainText=False,
page=page, show=self.modelClass.show, layouts='f')
self.addField('%s_label' % messageId, field)
def addMessageField(self, messageId, page, i18nFiles):
'''Adds a message field corresponding to p_messageId to the Translation
class, on a given p_page. We need i18n files p_i18nFiles for
fine-tuning the String type to generate for this field (one-line?
several lines?...)'''
params = {'page':page, 'layouts':'f', 'show': self.modelClass.show}
appName = self.generator.applicationName
# Scan all messages corresponding to p_messageId from all translation
# files. We will define field length from the longer found message
# content.
maxLine = 100 # We suppose a line is 100 characters long.
width = 0
height = 0
for fileName, poFile in i18nFiles.items():
if not fileName.startswith('%s-' % appName) or \
messageId not in i18nFiles[fileName].messagesDict:
# In this case this is not one of our Appy-managed translation
# files.
continue
msgContent = i18nFiles[fileName].messagesDict[messageId].msg
# Compute width
width = max(width, len(msgContent))
# Compute height (a "\n" counts for one line)
mHeight = int(len(msgContent)/maxLine) + msgContent.count('<br/>')
height = max(height, mHeight)
if height < 1:
# This is a one-line field.
params['width'] = width
else:
# This is a multi-line field, or a very-long-single-lined field
params['format'] = gen.String.TEXT
params['height'] = height
self.addField(messageId, gen.String(**params))
class PageClassDescriptor(ClassDescriptor):
'''Represents the class that corresponds to a Page.'''
def __init__(self, klass, generator):
ClassDescriptor.__init__(self,klass,klass._appy_attributes[:],generator)
self.modelClass = self.klass
self.predefined = True
self.customized = False
def getParents(self, allClasses=()):
res = ['Page']
if self.customized:
res.append('%s.%s' % (self.klass.__module__, self.klass.__name__))
return res
def update(self, klass, attributes):
'''This method is called by the generator when he finds a custom page
definition. We must then add the custom page elements in this
default Page descriptor.
NOTE: currently, it is not possible to define a custom Page class.'''
self.orderedAttributes += attributes
self.klass = klass
self.customized = True
def isFolder(self, klass=None): return True
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ouvidoria', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='solicitacao',
name='descricao',
field=models.TextField(default='', verbose_name='Descrição'),
),
migrations.AlterField(
model_name='solicitacao',
name='owner',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='owner'),
),
]
|
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from distutils.core import setup
from setuptools import find_packages, setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
def get_readme():
readme = ''
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (ImportError, IOError):
with open('README.md', 'r') as file_data:
readme = file_data.read()
return readme
setup(name='Parseit',
version='1.4.1',
description="A fixed record lenght text and csv file parser",
long_description=get_readme(),
keywords='parse text file csv',
author="Patricio Moracho",
author_email="pmoracho@gmail.com",
url="https://github.com/pmoracho/parseit",
packages=find_packages(),
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: Spanish',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Console :: Curses',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: User Interfaces',
'Topic :: Terminals']
)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DoublesMatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('email', models.EmailField(max_length=254)),
('gender', models.CharField(default=b'M', max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')])),
('position', models.CharField(default=b'F', max_length=1, choices=[(b'F', b'Forward'), (b'D', b'Defence')])),
('goal', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='SinglesMatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('player_one', models.ForeignKey(related_name='singlesplayerone', to='scoremanager.Player')),
('player_two', models.ForeignKey(related_name='singlesplayertwo', to='scoremanager.Player')),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('win', models.PositiveIntegerField(default=0)),
('loss', models.PositiveIntegerField(default=0)),
('player_one', models.ForeignKey(related_name='teamplayerone', to='scoremanager.Player')),
('player_two', models.ForeignKey(related_name='teamplayertwo', to='scoremanager.Player')),
],
),
migrations.AddField(
model_name='doublesmatch',
name='team_one',
field=models.ForeignKey(related_name='doublesteamone', to='scoremanager.Team'),
),
migrations.AddField(
model_name='doublesmatch',
name='team_two',
field=models.ForeignKey(related_name='doublesteamtwo', to='scoremanager.Team'),
),
]
|
"""Cylc site and user configuration file spec."""
import os
from typing import List, Optional, Tuple, Any
from pkg_resources import parse_version
from cylc.flow import LOG
from cylc.flow import __version__ as CYLC_VERSION
from cylc.flow.exceptions import GlobalConfigError
from cylc.flow.hostuserutil import get_user_home
from cylc.flow.network.client_factory import CommsMeth
from cylc.flow.parsec.config import (
ConfigNode as Conf,
ParsecConfig,
)
from cylc.flow.parsec.exceptions import ParsecError
from cylc.flow.parsec.upgrade import upgrader
from cylc.flow.parsec.validate import (
CylcConfigValidator as VDR,
DurationFloat,
cylc_config_validate,
)
SYSPATH = [
'/bin',
'/usr/bin',
'/usr/local/bin',
'/sbin',
'/usr/sbin',
'/usr/local/sbin'
]
TIMEOUT_DESCR = "Previously, 'timeout' was a stall timeout."
REPLACES = 'This item was previously called '
MOVEDFROMJOB = '''
.. versionchanged:: 8.0.0
Moved from ``suite.rc[runtime][<namespace>]job``.
'''
EVENTS_DESCR = {
'startup handlers': (
f'''
Handlers to run at scheduler startup.
.. versionchanged:: 8.0.0
{REPLACES}``startup handler``.
'''
),
'shutdown handlers': (
f'''
Handlers to run at scheduler shutdown.
.. versionchanged:: 8.0.0
{REPLACES}``shutdown handler``.
'''
),
'abort handlers': (
f'''
Handlers to run if the scheduler aborts.
.. versionchanged:: 8.0.0
{REPLACES}``aborted handler``.
'''
),
'workflow timeout': (
f'''
Workflow timeout interval. The timer starts counting down at scheduler
startup. It resets on workflow restart.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'workflow timeout handlers': (
f'''
Handlers to run if the workflow timer times out.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'abort on workflow timeout': (
f'''
Whether to abort if the workflow timer times out.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'stall handlers': (
f'''
Handlers to run if the scheduler stalls.
.. versionchanged:: 8.0.0
{REPLACES}``stalled handler``.
'''
),
'stall timeout': (
f'''
The length of a timer which starts if the scheduler stalls.
A workflow will stall if there are no tasks ready to run and no
waiting external triggers, but the presence of incomplete
tasks or unsatisified prerequisites shows the workflow did not run to
completion. The stall timer turns off on any post-stall task activity.
It resets on restarting a stalled workflow.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'stall timeout handlers': (
f'''
Handlers to run if the stall timer times out.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'abort on stall timeout': (
f'''
Whether to abort if the stall timer times out.
.. versionadded:: 8.0.0
{TIMEOUT_DESCR}
'''
),
'inactivity timeout': (
f'''
Scheduler inactivity timeout interval. The timer resets when any
workflow activity occurs.
.. versionchanged:: 8.0.0
{REPLACES} ``inactivity``.
'''
),
'inactivity timeout handlers': (
f'''
Handlers to run if the inactivity timer times out.
.. versionchanged:: 8.0.0
{REPLACES}``inactivity handler``.
'''
),
'abort on inactivity timeout': (
f'''
Whether to abort if the inactivity timer times out.
.. versionchanged:: 8.0.0
{REPLACES}``abort on inactivity``.
'''
)
}
with Conf('global.cylc', desc='''
The global configuration which defines default Cylc Flow settings
for a user or site.
To view your global config, run:
$ cylc config
Cylc will attempt to load the global configuration (``global.cylc``) from a
hierarchy of locations, including the site directory (defaults to
``/etc/cylc/flow/``) and the user directory (``~/.cylc/flow/``). For
example at Cylc version 8.0.1, the hierarchy would be, in order of
ascending priority:
.. code-block:: sub
<site-conf-path>/flow/global.cylc
<site-conf-path>/flow/8/global.cylc
<site-conf-path>/flow/8.0/global.cylc
<site-conf-path>/flow/8.0.1/global.cylc
~/.cylc/flow/global.cylc
~/.cylc/flow/8/global.cylc
~/.cylc/flow/8.0/global.cylc
~/.cylc/flow/8.0.1/global.cylc
Where ``<site-conf-path>`` is ``/etc/cylc/flow/`` by default but can be
changed by :envvar:`CYLC_SITE_CONF_PATH`.
A setting in a file lower down in the list will override the same setting
from those higher up (but if a setting is present in a file higher up and
not in any files lower down, it will not be overridden).
The following environment variables can change the files which are loaded:
.. envvar:: CYLC_CONF_PATH
If set this bypasses the default site/user configuration hierarchy used
to load the Cylc Flow global configuration.
This should be set to a directory containing a :cylc:conf:`global.cylc`
file.
.. envvar:: CYLC_SITE_CONF_PATH
By default the site configuration is located in ``/etc/cylc/``. For
installations where this is not convenient, this path can be overridden
by setting ``CYLC_SITE_CONF_PATH`` to point at another location.
Configuration for different Cylc components should be in sub-directories
within this location.
For example to configure Cylc Flow you could do the following::
$CYLC_SITE_CONF_PATH/
`-- flow/
`-- global.cylc
.. note::
The ``global.cylc`` file can be templated using Jinja2 variables.
See :ref:`Jinja`.
.. versionchanged:: 8.0.0
Prior to Cylc 8, ``global.cylc`` was named ``global.rc``, but that name
is no longer supported.
''') as SPEC:
with Conf('scheduler', desc=f'''
Default values for entries in :cylc:conf:`flow.cylc[scheduler]`
section.
.. versionchanged:: 8.0.0
{REPLACES}``[cylc]``.
.. note::
:cylc:conf:`global.cylc[scheduler]` should not be confused with
:cylc:conf:`flow.cylc[scheduling]`.
'''):
Conf('UTC mode', VDR.V_BOOLEAN, False, desc='''
Default for :cylc:conf:`flow.cylc[scheduler]UTC mode`.
''')
Conf('process pool size', VDR.V_INTEGER, 4, desc='''
Maximum number of concurrent processes used to execute external job
submission, event handlers, and job poll and kill commands
.. versionchanged:: 8.0.0
Moved into the ``[scheduler]`` section from the top level.
.. seealso::
:ref:`Managing External Command Execution`.
''')
Conf('process pool timeout', VDR.V_INTERVAL, DurationFloat(600),
desc='''
After this interval Cylc will kill long running commands in the
process pool.
.. versionchanged:: 8.0.0
Moved into the ``[scheduler]`` section from the top level.
.. seealso::
:ref:`Managing External Command Execution`.
.. note::
The default is set quite high to avoid killing important
processes when the system is under load.
''')
Conf('auto restart delay', VDR.V_INTERVAL, desc=f'''
Maximum number of seconds the auto-restart mechanism will delay
before restarting workflows.
.. versionchanged:: 8.0.0
{REPLACES}``global.rc[suite servers]auto restart delay``.
When a host is set to automatically
shutdown/restart it waits a random period of time
between zero and ``auto restart delay`` seconds before
beginning the process. This is to prevent large numbers of
workflows from restarting simultaneously.
.. seealso::
:ref:`auto-stop-restart`
''')
with Conf('run hosts', desc=f'''
Configure workflow hosts and ports for starting workflows.
.. versionchanged:: 8.0.0
{REPLACES}``[suite servers]``.
Additionally configure host selection settings specifying how to
determine the most suitable run host at any given time from those
configured.
'''):
Conf('available', VDR.V_SPACELESS_STRING_LIST, desc=f'''
A list of workflow run hosts.
.. versionchanged:: 8.0.0
{REPLACES}``[suite servers]run hosts``.
Cylc will choose one of these hosts for a workflow to start on.
(Unless an explicit host is provided as an option to the
``cylc play --host=<myhost>`` command.)
''')
Conf('ports', VDR.V_INTEGER_LIST, list(range(43001, 43101)),
desc=f'''
A list of allowed ports for Cylc to use to run workflows.
.. versionchanged:: 8.0.0
{REPLACES}``[suite servers]run ports``
''')
Conf('condemned', VDR.V_ABSOLUTE_HOST_LIST, desc=f'''
These hosts will not be used to run jobs.
.. versionchanged:: 8.0.0
{REPLACES}``[suite servers]condemned hosts``.
If workflows are already running on
condemned hosts, Cylc will shut them down and
restart them on different hosts.
.. seealso::
:ref:`auto-stop-restart`
''')
Conf('ranking', VDR.V_STRING, desc=f'''
Rank and filter run hosts based on system information.
.. versionchanged:: 8.0.0
{REPLACES}``[suite servers][run host select]rank``.
Ranking can be used to provide load balancing to ensure no
single run host is overloaded. It also provides thresholds
beyond which Cylc will not attempt to start new schedulers on
a host.
.. _psutil: https://psutil.readthedocs.io/en/latest/
This should be a multiline string containing Python expressions
to rank and/or filter hosts. All `psutil`_ attributes are
available for use in these expressions.
.. rubric:: Ranking
Rankings are expressions which return numerical values.
The host which returns the lowest value is chosen. Examples:
.. code-block:: python
# rank hosts by cpu_percent
cpu_percent()
# rank hosts by 15min average of server load
getloadavg()[2]
# rank hosts by the number of cores
# (multiple by -1 because the lowest value is chosen)
-1 * cpu_count()
.. rubric:: Threshold
Thresholds are expressions which return boolean values.
If a host returns a ``False`` value that host will not be
selected. Examples:
.. code-block:: python
# filter out hosts with a CPU utilisation of 70% or above
cpu_percent() < 70
# filter out hosts with less than 1GB of RAM available
virtual_memory.available > 1000000000
# filter out hosts with less than 1GB of disk space
# available on the "/" mount
disk_usage('/').free > 1000000000
.. rubric:: Combining
Multiple rankings and thresholds can be combined in this
section e.g:
.. code-block:: python
# filter hosts
cpu_percent() < 70
disk_usage('/').free > 1000000000
# rank hosts by CPU count
1 / cpu_count()
# if two hosts have the same CPU count
# then rank them by CPU usage
cpu_percent()
''')
with Conf('host self-identification', desc=f'''
How Cylc determines and shares the identity of the workflow host.
.. versionchanged:: 8.0.0
{REPLACES}``[suite host self-identification]``.
The workflow host's identity must be determined locally by cylc and
passed to running tasks (via ``$CYLC_WORKFLOW_HOST``) so that task
messages can target the right workflow on the right host.
'''):
# TODO
# Is it conceivable that different remote task hosts at the same
# site might see the workflow host differently? If so we'd need to
# be able to override the target in workflow configurations.
Conf(
'method', VDR.V_STRING, 'name',
options=['name', 'address', 'hardwired'],
desc=f'''
Determines how cylc finds the identity of the
workflow host.
.. versionchanged:: 8.0.0
{REPLACES}``[suite host self-identification]``.
Options:
name
(The default method) Self-identified host name.
Cylc asks the workflow host for its host name. This
should resolve on task hosts to the IP address of the
workflow host; if it doesn't, adjust network settings or
use one of the other methods.
address
Automatically determined IP address (requires *target*).
Cylc attempts to use a special external "target address"
to determine the IP address of the workflow host as
seen by remote task hosts.
hardwired
(only to be used as a last resort) Manually specified
host name or IP address (requires *host*) of the
workflow host.
''')
Conf('target', VDR.V_STRING, 'google.com', desc=f'''
Target for use by the *address* self-identification method.
If your workflow host sees the internet, a common
address such as ``google.com`` will do; otherwise choose a host
visible on your intranet.
.. versionchanged:: 8.0.0
{REPLACES}``[suite host self-identification]``.
''')
Conf('host', VDR.V_STRING, desc=f'''
The name or IP address of the workflow host used by the
*hardwired* self-identification method.
.. versionchanged:: 8.0.0
{REPLACES}``[suite host self-identification]``.
''')
with Conf('events', desc='''
Define site defaults for :cylc:conf:`flow.cylc[scheduler][events]`.
'''):
Conf('handlers', VDR.V_STRING_LIST)
Conf('handler events', VDR.V_STRING_LIST)
Conf('mail events', VDR.V_STRING_LIST)
for item, desc in EVENTS_DESCR.items():
if item.endswith("handlers"):
Conf(item, VDR.V_STRING_LIST, desc=desc)
elif item.startswith("abort on"):
default = (item == "abort on stall timeout")
Conf(item, VDR.V_BOOLEAN, default, desc=desc)
elif item.endswith("timeout"):
if item == "stall timeout":
def_intv: Optional['DurationFloat'] = (
DurationFloat(3600))
else:
def_intv = None
Conf(item, VDR.V_INTERVAL, def_intv, desc=desc)
with Conf('mail', desc=f'''
Options for email handling.
.. versionchanged:: 8.0.0
{REPLACES}``[cylc][events]mail <item>``.
'''):
Conf('from', VDR.V_STRING)
Conf('smtp', VDR.V_STRING)
Conf('to', VDR.V_STRING)
Conf('footer', VDR.V_STRING)
Conf(
'task event batch interval',
VDR.V_INTERVAL,
DurationFloat(300),
desc='''
Default for
:cylc:conf:`flow.cylc
[scheduler][mail]task event batch interval`
.. versionchanged:: 8.0.0
This item was previously
``[cylc]task event mail interval``
'''
)
with Conf('main loop', desc='''
Configuration of the Cylc Scheduler's main loop.
.. versionadded:: 8.0.0
'''):
Conf('plugins', VDR.V_STRING_LIST,
['health check', 'reset bad hosts'],
desc='''
Configure the default main loop plugins to use when
starting new workflows.
.. versionadded:: 8.0.0
''')
with Conf('<plugin name>', desc='''
Configure a main loop plugin.
''') as MainLoopPlugin:
Conf('interval', VDR.V_INTERVAL, desc='''
The interval with which this plugin is run.
.. versionadded:: 8.0.0
''')
with Conf('health check', meta=MainLoopPlugin, desc='''
Checks the integrity of the workflow run directory.
.. versionadded:: 8.0.0
'''):
Conf('interval', VDR.V_INTERVAL, DurationFloat(600), desc='''
The interval with which this plugin is run.
.. versionadded:: 8.0.0
''')
with Conf('reset bad hosts', meta=MainLoopPlugin, desc='''
Periodically clear the scheduler list of unreachable (bad)
hosts.
.. versionadded:: 8.0.0
'''):
Conf('interval', VDR.V_INTERVAL, DurationFloat(1800), desc='''
How often (in seconds) to run this plugin.
.. versionadded:: 8.0.0
''')
with Conf('logging', desc=f'''
Settings for the workflow event log.
The workflow event log, held under the workflow run directory, is
maintained as a rolling archive. Logs are rolled over (backed up
and started anew) when they reach a configurable limit size.
.. versionchanged:: 8.0.0
{REPLACES}``[suite logging]``.
'''):
Conf('rolling archive length', VDR.V_INTEGER, 5, desc='''
How many rolled logs to retain in the archive.
''')
Conf('maximum size in bytes', VDR.V_INTEGER, 1000000, desc='''
Workflow event logs are rolled over when they reach this
file size.
''')
with Conf('install', desc='''
Configure directories and files to be installed on remote hosts.
.. versionadded:: 8.0.0
'''):
Conf('source dirs', VDR.V_STRING_LIST, default=['~/cylc-src'], desc='''
List of paths that Cylc searches for workflows to install.
All workflow source directories in these locations will
also show up in the GUI, ready for installation.
.. caution::
If workflow source directories of the same name exist in more
than one of these paths, only the first one will be picked up.
''')
# Symlink Dirs
with Conf('symlink dirs', # noqa: SIM117 (keep same format)
desc="""
Configure alternate workflow run directory locations.
Symlinks from the the standard ``$HOME/cylc-run`` locations will be
created.
.. versionadded:: 8.0.0
"""):
with Conf('<install target>'):
Conf('run', VDR.V_STRING, None, desc="""
Alternative location for the run dir.
If specified, the workflow run directory will
be created in ``<this-path>/cylc-run/<workflow-name>``
and a symbolic link will be created from
``$HOME/cylc-run/<workflow-name>``.
If not specified the workflow run directory will be created
in ``$HOME/cylc-run/<workflow-name>``.
All the workflow files and the ``.service`` directory get
installed into this directory.
.. versionadded:: 8.0.0
""")
Conf('log', VDR.V_STRING, None, desc="""
Alternative location for the log dir.
If specified the workflow log directory will be created in
``<this-path>/cylc-run/<workflow-name>/log`` and a
symbolic link will be created from
``$HOME/cylc-run/<workflow-name>/log``. If not specified
the workflow log directory will be created in
``$HOME/cylc-run/<workflow-name>/log``.
.. versionadded:: 8.0.0
""")
Conf('share', VDR.V_STRING, None, desc="""
Alternative location for the share dir.
If specified the workflow share directory will be
created in ``<this-path>/cylc-run/<workflow-name>/share``
and a symbolic link will be created from
``<$HOME/cylc-run/<workflow-name>/share``. If not specified
the workflow share directory will be created in
``$HOME/cylc-run/<workflow-name>/share``.
.. versionadded:: 8.0.0
""")
Conf('share/cycle', VDR.V_STRING, None, desc="""
Alternative directory for the share/cycle dir.
If specified the workflow share/cycle directory
will be created in
``<this-path>/cylc-run/<workflow-name>/share/cycle``
and a symbolic link will be created from
``$HOME/cylc-run/<workflow-name>/share/cycle``. If not
specified the workflow share/cycle directory will be
created in ``$HOME/cylc-run/<workflow-name>/share/cycle``.
.. versionadded:: 8.0.0
""")
Conf('work', VDR.V_STRING, None, desc="""
Alternative directory for the work dir.
If specified the workflow work directory will be created in
``<this-path>/cylc-run/<workflow-name>/work`` and a
symbolic link will be created from
``$HOME/cylc-run/<workflow-name>/work``. If not specified
the workflow work directory will be created in
``$HOME/cylc-run/<workflow-name>/work``.
.. versionadded:: 8.0.0
""")
with Conf('editors', desc='''
Choose your favourite text editor for editing workflow configurations.
'''):
Conf('terminal', VDR.V_STRING, desc='''
An in-terminal text editor to be used by the Cylc command line.
If unspecified Cylc will use the environment variable
``$EDITOR`` which is the preferred way to set your text editor.
.. Note::
You can set your ``$EDITOR`` in your shell profile file
(e.g. ``~.bashrc``)
If neither this or ``$EDITOR`` are specified then Cylc will
default to ``vi``.
Examples::
ed
emacs -nw
nano
vi
''')
Conf('gui', VDR.V_STRING, desc='''
A graphical text editor to be used by cylc.
If unspecified Cylc will use the environment variable
``$GEDITOR`` which is the preferred way to set your text editor.
.. Note::
You can set your ``$GEDITOR`` in your shell profile file
(e.g. ``~.bashrc``)
If neither this or ``$GEDITOR`` are specified then Cylc will
default to ``gvim -fg``.
Examples::
atom --wait
code --new-window --wait
emacs
gedit -s
gvim -fg
nedit
''')
with Conf('platforms', desc='''
Platforms allow you to define compute resources available at your
site.
.. versionadded:: 8.0.0
A platform consists of a group of one or more hosts which share a
file system and a job runner (batch system).
A platform must allow interaction with the same task job from *any*
of its hosts.
'''):
with Conf('<platform name>', desc='''
Configuration defining a platform.
.. versionadded:: 8.0.0
Many of the items in platform definitions have been moved from
``flow.cylc[runtime][<namespace>][job]`` and
``flow.cylc[runtime][<namespace>][remote]``
Platform names can be regular expressions: If you have a set of
compute resources such as ``bigmachine1, bigmachine2`` or
``desktop0000, .., desktop9999`` one would define platforms with
names ``[[bigmachine[12]]]`` and ``[[desktop[0-9]{4}]]``.
.. note::
Each possible match to the definition regular expression is
considered a separate platform.
If you had a supercomputer with multiple login nodes this would
be a single platform with multiple :cylc:conf:`hosts`
.. seealso::
- :ref:`MajorChangesPlatforms` in the Cylc 8 migration guide.
- :ref:`AdminGuide.PlatformConfigs`, an administrator's guide to
platform configurations.
''') as Platform:
Conf('hosts', VDR.V_STRING_LIST, desc='''
A list of hosts from which the job host can be selected using
:cylc:conf:`[..][selection]method`.
.. versionadded:: 8.0.0
All hosts should share a file system.
''')
Conf('job runner', VDR.V_STRING, 'background', desc=f'''
The batch system/job submit method used to run jobs on the
platform.
.. versionchanged:: 8.0.0
{REPLACES}
``suite.rc[runtime][<namespace>][job]batch system``.
Examples:
* ``background``
* ``slurm``
* ``pbs``
''')
Conf('job runner command template', VDR.V_STRING, desc=f'''
Set the command used by the chosen job runner.
.. versionchanged:: 8.0.0
{REPLACES}``suite.rc[runtime][<namespace>][job]
batch system command template``.
The template's ``%(job)s`` will be
substituted by the job file path.
''')
Conf('shell', VDR.V_STRING, '/bin/bash', desc=f'''
{MOVEDFROMJOB}
''')
Conf('communication method',
VDR.V_STRING, 'zmq',
options=[meth.value for meth in CommsMeth], desc='''
The means by which task progress messages are reported back to
the running workflow.
Options:
zmq
Direct client-server TCP communication via network ports
poll
The workflow polls for task status (no task messaging)
ssh
Use non-interactive ssh for task communications
''')
# TODO ensure that it is possible to over-ride the following three
# settings in workflow config.
Conf('submission polling intervals', VDR.V_INTERVAL_LIST, desc=f'''
List of intervals at which to poll status of job submission.
{MOVEDFROMJOB}
Cylc can poll submitted jobs to catch problems that
prevent the submitted job from executing at all, such as
deletion from an external job runner queue. Routine
polling is done only for the polling ``task communication
method`` unless workflow-specific polling is configured in
the workflow configuration. A list of interval values can be
specified as for execution polling but a single value
is probably sufficient for job submission polling.
Example::
5*PT1M, 10*PT5M
''')
Conf('submission retry delays', VDR.V_INTERVAL_LIST, None, desc=f'''
{MOVEDFROMJOB}
''')
Conf('execution polling intervals', VDR.V_INTERVAL_LIST, desc=f'''
List of intervals at which to poll status of job execution.
{MOVEDFROMJOB}
Cylc can poll running jobs to catch problems that prevent task
messages from being sent back to the workflow, such as hard job
kills, network outages, or unplanned task host shutdown.
Routine polling is done only for the polling *task
communication method* (below) unless polling is
configured in the workflow configuration. A list of interval
values can be specified, with the last value used repeatedly
until the task is finished - this allows more frequent polling
near the beginning and end of the anticipated task run time.
Multipliers can be used as shorthand as in the example below.
Example::
5*PT1M, 10*PT5M
''')
Conf('execution time limit polling intervals',
VDR.V_INTERVAL_LIST, desc='''
List of intervals after execution time limit to poll jobs.
The intervals between polling after a task job (submitted to
the relevant job runner on the relevant host) exceeds its
execution time limit. The default setting is PT1M, PT2M, PT7M.
The accumulated times (in minutes) for these intervals will be
roughly 1, 1 + 2 = 3 and 1 + 2 + 7 = 10 after a task job
exceeds its execution time limit.
''')
Conf('ssh command',
VDR.V_STRING,
'ssh -oBatchMode=yes -oConnectTimeout=10',
desc='''
A communication command used to invoke commands on this
platform.
Not used on the workflow host unless you run local tasks
under another user account. The value is assumed to be ``ssh``
with some initial options or a command that implements a
similar interface to ``ssh``.
''')
Conf('use login shell', VDR.V_BOOLEAN, True, desc='''
Whether to use a login shell or not for remote command
invocation.
By default, Cylc runs remote SSH commands using a login shell:
.. code-block:: bash
ssh user@host 'bash --login cylc ...'
which will source the following files (in order):
* ``/etc/profile``
* ``~/.bash_profile``
* ``~/.bash_login``
* ``~/.profile``
.. _Bash man pages: https://linux.die.net/man/1/bash
For more information on login shells see the "Invocation"
section of the `Bash man pages`_.
For security reasons some institutions do not allow unattended
commands to start login shells, so you can turn off this
behaviour to get:
.. code-block:: bash
ssh user@host 'cylc ...'
which will use the default shell on the remote machine,
sourcing ``~/.bashrc`` (or ``~/.cshrc``) to set up the
environment.
''')
Conf('cylc path', VDR.V_STRING, desc='''
The path containing the ``cylc`` executable on a remote
platform.
.. versionchanged:: 8.0.0
Moved from ``suite.rc[runtime][<namespace>][job]
cylc executable``.
This may be necessary if the ``cylc`` executable is not in the
``$PATH`` for an ``ssh`` call.
Test whether this is the case by using
``ssh <host> command -v cylc``.
This path is used for remote invocations of the ``cylc``
command and is added to the ``$PATH`` in job scripts
for the configured platform.
.. note::
If :cylc:conf:`[..]use login shell = True` (the default)
then an alternative approach is to add ``cylc`` to the
``$PATH`` in the system or user Bash profile files
(e.g. ``~/.bash_profile``).
.. tip::
For multi-version installations this should point to the
Cylc wrapper script rather than the ``cylc`` executable
itself.
See :ref:`managing environments` for more information on
the wrapper script.
''')
Conf('global init-script', VDR.V_STRING, desc='''
Add a script before the init-script of all jobs on this
platform.
If specified, the value of this setting will be inserted to
just before the ``init-script`` section of all job scripts that
are to be submitted to the specified platform.
''')
Conf('copyable environment variables', VDR.V_STRING_LIST, '',
desc='''
A list containing the names of the environment variables to
be copied from the scheduler to a job.
''')
Conf('retrieve job logs', VDR.V_BOOLEAN, desc='''
Global default for
:cylc:conf:`flow.cylc[runtime][<namespace>][remote]
retrieve job logs`.
''')
Conf('retrieve job logs command', VDR.V_STRING, 'rsync -a',
desc='''
If ``rsync -a`` is unavailable or insufficient to retrieve job
logs from a remote platform, you can use this setting to
specify a suitable command.
''')
Conf('retrieve job logs max size', VDR.V_STRING, desc='''
Global default for
:cylc:conf:`flow.cylc[runtime][<namespace>][remote]
retrieve job logs max size` for this platform.
''')
Conf('retrieve job logs retry delays', VDR.V_INTERVAL_LIST,
desc='''
Global default for
:cylc:conf:`flow.cylc[runtime][<namespace>][remote]
retrieve job logs retry delays`
for this platform.
''')
Conf('tail command template',
VDR.V_STRING, 'tail -n +1 -F %(filename)s', desc='''
A command template (with ``%(filename)s`` substitution) to
tail-follow job logs this platform, by ``cylc cat-log``.
You are are unlikely to need to override this.
''')
Conf('err tailer', VDR.V_STRING, desc='''
A command template (with ``%(job_id)s`` substitution) that can
be used to tail-follow the stderr stream of a running job if
SYSTEM does not use the normal log file location while the job
is running. This setting overrides
:cylc:conf:`[..]tail command template`.
Examples::
# for PBS
qcat -f -e %(job_id)s
''')
Conf('out tailer', VDR.V_STRING, desc='''
A command template (with ``%(job_id)s`` substitution) that can
be used to tail-follow the stdout stream of a running job if
SYSTEM does not use the normal log file location while the job
is running. This setting overrides
:cylc:conf:`[..]tail command template`.
Examples::
# for PBS
qcat -f -o %(job_id)s
''')
Conf('err viewer', VDR.V_STRING, desc='''
A command template (with ``%(job_id)s`` substitution) that can
be used to view the stderr stream of a running job if SYSTEM
does not use the normal log file location while the job is
running.
Examples::
# for PBS
qcat -e %(job_id)s
''')
Conf('out viewer', VDR.V_STRING, desc='''
A command template (with ``%(job_id)s`` substitution) that can
be used to view the stdout stream of a running job if SYSTEM
does not use the normal log file location while the job is
running.
Examples::
# for PBS
qcat -o %(job_id)s
''')
Conf('job name length maximum', VDR.V_INTEGER, desc='''
The maximum length for job name acceptable by a job runner on
a given host. Currently, this setting is only meaningful for
PBS jobs. For example, PBS 12 or older will fail a job submit
if the job name has more than 15 characters; whereas PBS 13
accepts up to 236 characters.
''')
Conf('install target', VDR.V_STRING, desc='''
This defaults to the platform name. This will be used as the
target for remote file installation.
For example, if Platform_A shares a file system with localhost:
.. code-block:: cylc
[platforms]
[[Platform_A]]
install target = localhost
''')
Conf('clean job submission environment', VDR.V_BOOLEAN, False,
desc='''
Job submission subprocesses inherit their parent environment by
default. Remote jobs inherit the default non-interative shell
environment for their platform. Jobs on the scheduler host
inherit the scheduler environment (unless their job runner
prevents this).
If, for example, the ``$PYTHON`` variable is different on the
scheduler and the remote host the same program may run in
different ways.
We recommend using a clean job submission environment for
consistent handling of local and remote jobs. However,
this is not the default behavior because it prevents
local task jobs from running, unless ``$PATH`` contains the
``cylc`` wrapper script.
Specific environment variables can be singled out to pass
through to the clean environment, if necessary.
A standard set of executable paths is passed through to clean
environments, and can be added to if necessary.
''')
Conf('job submission environment pass-through', VDR.V_STRING_LIST,
desc='''
List of environment variable names to pass through to
job submission subprocesses.
``$HOME`` is passed automatically.
You are unlikely to need this.
''')
Conf('job submission executable paths', VDR.V_STRING_LIST,
desc=f'''
Additional executable locations to pass to the job
submission subprocess beyond the standard locations
{", ".join(f"``{i}``" for i in SYSPATH)}.
You are unlikely to need this.
''')
Conf('max batch submit size', VDR.V_INTEGER, default=100, desc='''
Limits the maximum number of jobs that can be submitted at
once.
Where possible Cylc will batch together job submissions to
the same platform for efficiency. Submitting very large
numbers of jobs can cause problems with some submission
systems so for safety there is an upper limit on the number
of job submissions which can be batched together.
''')
with Conf('selection', desc='''
How to select platform from list of hosts.
.. versionadded:: 8.0.0
''') as Selection:
Conf('method', VDR.V_STRING, default='random',
options=['random', 'definition order'],
desc='''
Method for choosing the job host from the platform.
.. versionadded:: 8.0.0
.. rubric:: Available options
- ``random``: Choose randomly from the list of hosts.
This is suitable for a pool of identical hosts.
- ``definition order``: Take the first host in the list
unless that host was unreachable. In many cases
this is likely to cause load imbalances, but might
be appropriate if following the pattern
``hosts = main, backup, failsafe``.
''')
with Conf('localhost', meta=Platform, desc='''
A default platform defining settings for jobs to be run on the
same host as the workflow scheduler.
.. attention::
It is common practice to run the Cylc scheduler on a dedicated
host: In this case **"localhost" will refer to the host where
the scheduler is running and not the computer where you
ran "cylc play"**.
'''):
Conf('hosts', VDR.V_STRING_LIST, ['localhost'])
with Conf('selection', meta=Selection):
Conf('method', VDR.V_STRING, default='definition order')
# Platform Groups
with Conf('platform groups', desc='''
Platform groups allow you to group together platforms which would
all be suitable for a given job.
.. versionadded:: 8.0.0
When Cylc sets up a task job it will pick a platform from a group.
Cylc will then use the selected platform for all interactions with
that job.
For example, if you have a group of computers
without a shared file system, but otherwise identical called
``bigmachine01..02`` you might set up a platform group
``[[bigmachines]]platforms=bigmachine01, bigmachine02``.
.. seealso::
- :ref:`MajorChangesPlatforms` in the Cylc 8 migration guide.
- :ref:`AdminGuide.PlatformConfigs`, an guide to platform
configurations.
'''): # noqa: SIM117 (keep same format)
with Conf('<group>'):
Conf('platforms', VDR.V_STRING_LIST, desc='''
A list of platforms which can be selected if
:cylc:conf:`flow.cylc[runtime][<namespace>]platform` matches
the name of this platform group.
.. versionadded:: 8.0.0
''')
with Conf('selection'):
Conf(
'method', VDR.V_STRING, default='random',
options=['random', 'definition order'],
desc='''
Method for selecting platform from group.
.. versionadded:: 8.0.0
options:
- random: Suitable for an identical pool of platforms.
- definition order: Pick the first available platform
from the list.
'''
)
# task
with Conf('task events', desc='''
Global site/user defaults for
:cylc:conf:`flow.cylc[runtime][<namespace>][events]`.
'''):
Conf('execution timeout', VDR.V_INTERVAL)
Conf('handlers', VDR.V_STRING_LIST)
Conf('handler events', VDR.V_STRING_LIST)
Conf('handler retry delays', VDR.V_INTERVAL_LIST, None)
Conf('mail events', VDR.V_STRING_LIST)
Conf('submission timeout', VDR.V_INTERVAL)
def upg(cfg, descr):
"""Upgrader."""
u = upgrader(cfg, descr)
u.upgrade()
def get_version_hierarchy(version: str) -> List[str]:
"""Return list of versions whose global configs are compatible, in
ascending priority.
Args:
version: A PEP 440 compliant version tag.
Example:
>>> get_version_hierarchy('8.0.1a2.dev')
['', '8', '8.0', '8.0.1', '8.0.1a2', '8.0.1a2.dev']
"""
smart_ver: Any = parse_version(version)
# (No type anno. yet for Version in pkg_resources.extern.packaging.version)
base = [str(i) for i in smart_ver.release]
hierarchy = ['']
hierarchy += ['.'.join(base[:i]) for i in range(1, len(base) + 1)]
if smart_ver.pre: # alpha/beta (excluding dev) part of version
pre_ver = ''.join(str(i) for i in smart_ver.pre)
hierarchy.append(f'{hierarchy[-1]}{pre_ver}')
if version not in hierarchy: # catch-all
hierarchy.append(version)
return hierarchy
class GlobalConfig(ParsecConfig):
"""
Handle global (all workflows) site and user configuration for cylc.
User file values override site file values.
"""
_DEFAULT: Optional['GlobalConfig'] = None
CONF_BASENAME: str = "global.cylc"
DEFAULT_SITE_CONF_PATH: str = os.path.join(os.sep, 'etc', 'cylc')
USER_CONF_PATH: str = os.path.join(
os.getenv('HOME') or get_user_home(), '.cylc', 'flow'
)
VERSION_HIERARCHY: List[str] = get_version_hierarchy(CYLC_VERSION)
def __init__(self, *args, **kwargs) -> None:
site_conf_root = (
os.getenv('CYLC_SITE_CONF_PATH') or self.DEFAULT_SITE_CONF_PATH
)
self.conf_dir_hierarchy: List[Tuple[str, str]] = [
*[
(upgrader.SITE_CONFIG,
os.path.join(site_conf_root, 'flow', ver))
for ver in self.VERSION_HIERARCHY
],
*[
(upgrader.USER_CONFIG,
os.path.join(self.USER_CONF_PATH, ver))
for ver in self.VERSION_HIERARCHY
]
]
super().__init__(*args, **kwargs)
@classmethod
def get_inst(cls, cached=True):
"""Return a GlobalConfig instance.
Args:
cached (bool):
If cached create if necessary and return the singleton
instance, else return a new instance.
"""
if not cached:
# Return an up-to-date global config without affecting the
# singleton.
new_instance = cls(SPEC, upg, validator=cylc_config_validate)
new_instance.load()
return new_instance
elif not cls._DEFAULT:
cls._DEFAULT = cls(SPEC, upg, validator=cylc_config_validate)
cls._DEFAULT.load()
return cls._DEFAULT
def _load(self, fname, conf_type):
if os.access(fname, os.F_OK | os.R_OK):
self.loadcfg(fname, conf_type)
def load(self):
"""Load or reload configuration from files."""
self.sparse.clear()
self.dense.clear()
LOG.debug("Loading site/user config files")
conf_path_str = os.getenv("CYLC_CONF_PATH")
if conf_path_str:
# Explicit config file override.
fname = os.path.join(conf_path_str, self.CONF_BASENAME)
self._load(fname, upgrader.USER_CONFIG)
else:
# Use default locations.
for conf_type, conf_dir in self.conf_dir_hierarchy:
fname = os.path.join(conf_dir, self.CONF_BASENAME)
try:
self._load(fname, conf_type)
except ParsecError:
LOG.error(f'bad {conf_type} {fname}')
raise
self._set_default_editors()
self._no_platform_group_name_overlap()
def _set_default_editors(self):
# default to $[G]EDITOR unless an editor is defined in the config
# NOTE: use `or` to handle cases where an env var is set to ''
cfg = self.get()
if not cfg['editors']['terminal']:
cfg['editors']['terminal'] = os.environ.get('EDITOR') or 'vi'
if not cfg['editors']['gui']:
cfg['editors']['gui'] = os.environ.get('GEDITOR') or 'gvim -fg'
def _no_platform_group_name_overlap(self):
if (
'platforms' in self.sparse and
'platform groups' in self.sparse
):
names_in_platforms_and_groups = set(
self.sparse['platforms'].keys()).intersection(
set(self.sparse['platform groups'].keys()))
if names_in_platforms_and_groups:
msg = (
'Platforms and platform groups must not share names. '
'The following are in both sets:'
)
for name in names_in_platforms_and_groups:
msg += f'\n * {name}'
raise GlobalConfigError(msg)
|
import os
import sys
import VTKConvert
if len(sys.argv) == 1 :
print "Usage: processISISData file-name1 file-name2 ...\n processISISDATA dir-name"
exit(1)
names=[]
is_dir = os.path.isdir(sys.argv[1])
if is_dir :
names = os.listdir(sys.argv[1])
else:
for i in range(1,len(sys.argv)):
names.append(sys.argv[i])
prefix=""
if is_dir :
prefix = sys.argv[1].split('/')[0] + "-VTU/"
if os.path.isdir(prefix) :
print "Directory " + prefix + " already exists, please move\n"
exit(1)
else:
os.mkdir(prefix)
else:
prefix = "./"
for file in names:
if is_dir :
filename = sys.argv[1] + file
VTKConvert.convertToVTU(filename, prefix)
VTKConvert.writeParallelVTU(names, prefix)
|
from django.dispatch import receiver
from django_fsm import signals
from core.editor.models import IssueSubmission
from core.editor.models import IssueSubmissionStatusTrack
@receiver(signals.post_transition, sender=IssueSubmission)
def register_status_track(sender, instance, name, source, target, **kwargs):
# Registers a new track for the status of the issue submission
status_track = IssueSubmissionStatusTrack.objects.create(
issue_submission=instance, status=instance.status
)
# If the IssueSubmission instance was submitted, attaches the last files version to
# the status track.
if instance.status == IssueSubmission.SUBMITTED:
status_track.files_version = instance.last_files_version
status_track.save()
|
import os
from trepan.processor.command import base_cmd as Mbase_cmd
from trepan.processor import frame as Mframe
class UpCommand(Mbase_cmd.DebuggerCommand):
signum = -1
category = 'stack'
min_args = 0
max_args = 1
name = os.path.basename(__file__).split('.')[0]
need_stack = True
short_help = 'Move frame in the direction of the caller of ' \
'the last-selected frame'
def complete(self, prefix):
proc_obj = self.proc
return Mframe.frame_complete(proc_obj, prefix, self.signum)
def run(self, args):
"""**up** [*count*]
Move the current frame up in the stack trace (to an older frame). 0 is
the most recent frame. If no count is given, move up 1.
See also:
---------
`down` and `frame`."""
Mframe.adjust_relative(self.proc, self.name, args, self.signum)
return False
|
from new_address_widget import NewAddressWidget
from existing_address_widget import ExistingAddressWidget
|
import types
from appy import Object
from appy.gen import Field
from appy.px import Px
from DateTime import DateTime
from BTrees.IOBTree import IOBTree
from persistent.list import PersistentList
class Calendar(Field):
'''This field allows to produce an agenda (monthly view) and view/edit
events on it.'''
jsFiles = {'view': ('calendar.js',)}
# Month view for a calendar. Called by pxView, and directly from the UI,
# via Ajax, when the user selects another month.
pxMonthView = Px('''
<div var="ajaxHookId=zobj.id + field.name;
month=req['month'];
monthDayOne=DateTime('%s/01' % month);
today=DateTime('00:00');
grid=field.getMonthGrid(month);
allEventTypes=field.getEventTypes(zobj);
preComputed=field.getPreComputedInfo(zobj, monthDayOne, grid);
defaultDate=field.getDefaultDate(zobj);
defaultDateMonth=defaultDate.strftime('%Y/%m');
previousMonth=field.getSiblingMonth(month, 'previous');
nextMonth=field.getSiblingMonth(month, 'next');
mayEdit=zobj.mayEdit(field.writePermission);
objUrl=zobj.absolute_url();
startDate=field.getStartDate(zobj);
endDate=field.getEndDate(zobj);
otherCalendars=field.getOtherCalendars(zobj, preComputed)"
id=":ajaxHookId">
<script type="text/javascript">:'var %s_maxEventLength = %d' % \
(field.name, field.maxEventLength)</script>
<!-- Month chooser -->
<div style="margin-bottom: 5px"
var="fmt='%Y/%m/%d';
goBack=not startDate or (startDate.strftime(fmt) < \
grid[0][0].strftime(fmt));
goForward=not endDate or (endDate.strftime(fmt) > \
grid[-1][-1].strftime(fmt))">
<!-- Go to the previous month -->
<img class="clickable" if="goBack" src=":url('arrowLeft')"
onclick=":'askMonthView(%s,%s,%s,%s)' % \
(q(ajaxHookId),q(objUrl),q(field.name),q(previousMonth))"/>
<!-- Go back to the default date -->
<input type="button" if="goBack or goForward"
var="fmt='%Y/%m';
label=(defaultDate.strftime(fmt)==today.strftime(fmt)) and \
'today' or 'goto_source'"
value=":_(label)"
onclick=":'askMonthView(%s, %s, %s, %s)' % (q(ajaxHookId), \
q(objUrl), q(field.name), q(defaultDateMonth))"
disabled=":defaultDate.strftime(fmt)==monthDayOne.strftime(fmt)"/>
<!-- Go to the next month -->
<img class="clickable" if="goForward" src=":url('arrowRight')"
onclick=":'askMonthView(%s, %s, %s, %s)' % (q(ajaxHookId), \
q(objUrl), q(field.name), q(nextMonth))"/>
<span>:_('month_%s' % monthDayOne.aMonth())</span>
<span>:month.split('/')[0]</span>
</div>
<!-- Calendar month view -->
<table cellpadding="0" cellspacing="0" width="100%" class="list"
style="font-size: 95%"
var="rowHeight=int(field.height/float(len(grid)))">
<!-- 1st row: names of days -->
<tr height="22px">
<th for="dayName in field.getNamesOfDays(zobj)"
width="14%">:dayName</th>
</tr>
<!-- The calendar in itself -->
<tr for="row in grid" valign="top" height=":rowHeight">
<x for="date in row"
var2="tooEarly=startDate and (date < startDate);
tooLate=endDate and not tooEarly and (date > endDate);
inRange=not tooEarly and not tooLate;
cssClasses=field.getCellStyle(zobj, date, today)">
<!-- Dump an empty cell if we are out of the supported date range -->
<td if="not inRange" class=":cssClasses"></td>
<!-- Dump a normal cell if we are in range -->
<td if="inRange"
var2="events=field.getEventsAt(zobj, date);
spansDays=field.hasEventsAt(zobj, date+1, events);
mayCreate=mayEdit and not events;
mayDelete=mayEdit and events;
day=date.day();
dayString=date.strftime('%Y/%m/%d')"
style="date.isCurrentDay() and 'font-weight:bold' or \
'font-weight:normal'"
class=":cssClasses"
onmouseover=":mayEdit and 'this.getElementsByTagName(\
%s)[0].style.visibility=%s' % (q('img'), q('visible')) or ''"
onmouseout="mayEdit and 'this.getElementsByTagName(\
%s)[0].style.visibility=%s' % (q('img'), q('hidden')) or ''">
<span>:day</span>
<span if="day == 1">:_('month_%s_short' % date.aMonth())</span>
<!-- Icon for adding an event -->
<x if="mayCreate">
<img class="clickable" style="visibility:hidden"
var="info=field.getApplicableEventsTypesAt(zobj, date, \
allEventTypes, preComputed, True)"
if="info.eventTypes" src=":url('plus')"
onclick=":'openEventPopup(%s, %s, %s, null, %s, %s)' % \
(q('new'), q(field.name), q(dayString), q(info.eventTypes),\
q(info.message))"/>
</x>
<!-- Icon for deleting an event -->
<img if="mayDelete" class="clickable" style="visibility:hidden"
src=":url('delete')"
onclick=":'openEventPopup(%s, %s, %s, %s, null, null)' % \
(q('del'), q(field.name), q(dayString), q(spansDays))"/>
<!-- A single event is allowed for the moment -->
<div if="events" var2="eventType=events[0].eventType">
<span style="color: grey">:field.getEventName(zobj, eventType)</span>
</div>
<!-- Events from other calendars -->
<x if="otherCalendars"
var2="otherEvents=field.getOtherEventsAt(zobj, date, \
otherCalendars)">
<div style=":'color: %s; font-style: italic' % event.color"
for="event in otherEvents">:event.name</div>
</x>
<!-- Additional info -->
<x var="info=field.getAdditionalInfoAt(zobj, date, preComputed)"
if="info">::info</x>
</td>
</x>
</tr>
</table>
<!-- Popup for creating a calendar event -->
<div var="prefix='%s_newEvent' % field.name;
popupId=prefix + 'Popup'"
id=":popupId" class="popup" align="center">
<form id="prefix + 'Form'" method="post">
<input type="hidden" name="fieldName" value=":field.name"/>
<input type="hidden" name="month" value=":month"/>
<input type="hidden" name="name" value=":field.name"/>
<input type="hidden" name="action" value="Process"/>
<input type="hidden" name="actionType" value="createEvent"/>
<input type="hidden" name="day"/>
<!-- Choose an event type -->
<div align="center" style="margin-bottom: 3px">:_('which_event')</div>
<select name="eventType">
<option value="">:_('choose_a_value')</option>
<option for="eventType in allEventTypes"
value=":eventType">:field.getEventName(zobj,eventType)</option>
</select><br/><br/>
<!--Span the event on several days -->
<div align="center" class="discreet" style="margin-bottom: 3px">
<span>:_('event_span')</span>
<input type="text" size="3" name="eventSpan"/>
</div>
<input type="button"
value=":_('object_save')"
onclick=":'triggerCalendarEvent(%s, %s, %s, %s, \
%s_maxEventLength)' % (q('new'), q(ajaxHookId), \
q(field.name), q(objUrl), field.name)"/>
<input type="button"
value=":_('object_cancel')"
onclick=":'closePopup(%s)' % q(popupId)"/>
</form>
</div>
<!-- Popup for deleting a calendar event -->
<div var="prefix='%s_delEvent' % field.name;
popupId=prefix + 'Popup'"
id=":popupId" class="popup" align="center">
<form id=":prefix + 'Form'" method="post">
<input type="hidden" name="fieldName" value=":field.name"/>
<input type="hidden" name="month" value=":month"/>
<input type="hidden" name="name" value=":field.name"/>
<input type="hidden" name="action" value="Process"/>
<input type="hidden" name="actionType" value="deleteEvent"/>
<input type="hidden" name="day"/>
<div align="center" style="margin-bottom: 5px">_('action_confirm')</div>
<!-- Delete successive events ? -->
<div class="discreet" style="margin-bottom: 10px"
id=":prefix + 'DelNextEvent'">
<input type="checkbox" name="deleteNext_cb"
id=":prefix + '_cb'"
onClick=":'toggleCheckbox(%s, %s)' % \
(q('%s_cb' % prefix), q('%s_hd' % prefix))"/>
<input type="hidden" id=":prefix + '_hd'" name="deleteNext"/>
<span>:_('del_next_events')</span>
</div>
<input type="button" value=":_('yes')"
onClick=":'triggerCalendarEvent(%s, %s, %s, %s)' % \
(q('del'), q(ajaxHookId), q(field.name), q(objUrl))"/>
<input type="button" value=":_('no')"
onclick=":'closePopup(%s)' % q(popupId)"/>
</form>
</div>
</div>''')
pxView = pxCell = Px('''
<x var="defaultDate=field.getDefaultDate(zobj);
x=req.set('month', defaultDate.strftime('%Y/%m'));
x=req.set('fieldName', field.name)">:field.pxMonthView</x>''')
pxEdit = pxSearch = ''
def __init__(self, eventTypes, eventNameMethod=None, validator=None,
default=None, show='view', page='main', group=None,
layouts=None, move=0, specificReadPermission=False,
specificWritePermission=False, width=None, height=300,
colspan=1, master=None, masterValue=None, focus=False,
mapping=None, label=None, maxEventLength=50,
otherCalendars=None, additionalInfo=None, startDate=None,
endDate=None, defaultDate=None, preCompute=None,
applicableEvents=None):
Field.__init__(self, validator, (0,1), default, show, page, group,
layouts, move, False, False, specificReadPermission,
specificWritePermission, width, height, None, colspan,
master, masterValue, focus, False, mapping, label, None,
None, None, None, True)
# eventTypes can be a "static" list or tuple of strings that identify
# the types of events that are supported by this calendar. It can also
# be a method that computes such a "dynamic" list or tuple. When
# specifying a static list, an i18n label will be generated for every
# event type of the list. When specifying a dynamic list, you must also
# give, in p_eventNameMethod, a method that will accept a single arg
# (=one of the event types from your dynamic list) and return the "name"
# of this event as it must be shown to the user.
self.eventTypes = eventTypes
self.eventNameMethod = eventNameMethod
if (type(eventTypes) == types.FunctionType) and not eventNameMethod:
raise Exception("When param 'eventTypes' is a method, you must " \
"give another method in param 'eventNameMethod'.")
# It is not possible to create events that span more days than
# maxEventLength.
self.maxEventLength = maxEventLength
# When displaying a given month for this agenda, one may want to
# pre-compute, once for the whole month, some information that will then
# be given as arg for other methods specified in subsequent parameters.
# This mechanism exists for performance reasons, to avoid recomputing
# this global information several times. If you specify a method in
# p_preCompute, it will be called every time a given month is shown, and
# will receive 2 args: the first day of the currently shown month (as a
# DateTime instance) and the grid of all shown dates (as a list of lists
# of DateTime instances, one sub-list by row in the month view). This
# grid may hold a little more than dates of the current month.
# Subsequently, the return of your method will be given as arg to other
# methods that you may specify as args of other parameters of this
# Calendar class (see comments below).
self.preCompute = preCompute
# If a method is specified in the following parameters, it must accept
# a single arg (the result of self.preCompute) and must return a list of
# calendars whose events must be shown within this agenda.
# Every element in this list must be a sub-list [object, name, color]
# (not a tuple):
# - object must refer to the other object on which the other calendar
# field is defined;
# - name is the name of the field on this object that stores the
# calendar;
# - color must be a string containing the HTML color (including the
# leading "#" when relevant) into which events of the calendar must
# appear.
self.otherCalendars = otherCalendars
# One may want to add, day by day, custom information in the calendar.
# When a method is given in p_additionalInfo, for every cell of the
# month view, this method will be called with 2 args: the cell's date
# and the result of self.preCompute. The method's result (a string that
# can hold text or a chunk of XHTML) will be inserted in the cell.
self.additionalInfo = additionalInfo
# One may limit event encoding and viewing to some period of time,
# via p_startDate and p_endDate. Those parameters, if given, must hold
# methods accepting no arg and returning a Zope DateTime instance. The
# startDate and endDate will be converted to UTC at 00.00.
self.startDate = startDate
self.endDate = endDate
# If a default date is specified, it must be a method accepting no arg
# and returning a DateTime instance. As soon as the calendar is shown,
# the month where this date is included will be shown. If not default
# date is specified, it will be 'now' at the moment the calendar is
# shown.
self.defaultDate = defaultDate
# For a specific day, all event types may not be applicable. If this is
# the case, one may specify here a method that defines, for a given day,
# a sub-set of all event types. This method must accept 3 args: the day
# in question (as a DateTime instance), the list of all event types,
# which is a copy of the (possibly computed) self.eventTypes) and
# the result of calling self.preCompute. The method must modify
# the 2nd arg and remove from it potentially not applicable events.
# This method can also return a message, that will be shown to the user
# for explaining him why he can, for this day, only create events of a
# sub-set of the possible event types (or even no event at all).
self.applicableEvents = applicableEvents
def getPreComputedInfo(self, obj, monthDayOne, grid):
'''Returns the result of calling self.preComputed, or None if no such
method exists.'''
if self.preCompute:
return self.preCompute(obj.appy(), monthDayOne, grid)
def getSiblingMonth(self, month, prevNext):
'''Gets the next or previous month (depending of p_prevNext) relative
to p_month.'''
dayOne = DateTime('%s/01 UTC' % month)
if prevNext == 'previous':
refDate = dayOne - 1
elif prevNext == 'next':
refDate = dayOne + 33
return refDate.strftime('%Y/%m')
weekDays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def getNamesOfDays(self, obj, short=True):
res = []
for day in self.weekDays:
if short:
suffix = '_short'
else:
suffix = ''
res.append(obj.translate('day_%s%s' % (day, suffix)))
return res
def getMonthGrid(self, month):
'''Creates a list of lists of DateTime objects representing the calendar
grid to render for a given p_month.'''
# Month is a string "YYYY/mm".
currentDay = DateTime('%s/01 UTC' % month)
currentMonth = currentDay.month()
res = [[]]
dayOneNb = currentDay.dow() or 7 # This way, Sunday is 7 and not 0.
if dayOneNb != 1:
previousDate = DateTime(currentDay)
# If the 1st day of the month is not a Monday, start the row with
# the last days of the previous month.
for i in range(1, dayOneNb):
previousDate = previousDate - 1
res[0].insert(0, previousDate)
finished = False
while not finished:
# Insert currentDay in the grid
if len(res[-1]) == 7:
# Create a new row
res.append([currentDay])
else:
res[-1].append(currentDay)
currentDay = currentDay + 1
if currentDay.month() != currentMonth:
finished = True
# Complete, if needed, the last row with the first days of the next
# month.
if len(res[-1]) != 7:
while len(res[-1]) != 7:
res[-1].append(currentDay)
currentDay = currentDay + 1
return res
def getOtherCalendars(self, obj, preComputed):
'''Returns the list of other calendars whose events must also be shown
on this calendar.'''
if self.otherCalendars:
res = self.otherCalendars(obj.appy(), preComputed)
# Replace field names with field objects
for i in range(len(res)):
res[i][1] = res[i][0].getField(res[i][1])
return res
def getAdditionalInfoAt(self, obj, date, preComputed):
'''If the user has specified a method in self.additionalInfo, we call
it for displaying this additional info in the calendar, at some
p_date.'''
if not self.additionalInfo: return
return self.additionalInfo(obj.appy(), date, preComputed)
def getEventTypes(self, obj):
'''Returns the (dynamic or static) event types as defined in
self.eventTypes.'''
if type(self.eventTypes) == types.FunctionType:
return self.eventTypes(obj.appy())
else:
return self.eventTypes
def getApplicableEventsTypesAt(self, obj, date, allEventTypes, preComputed,
forBrowser=False):
'''Returns the event types that are applicable at a given p_date. More
precisely, it returns an object with 2 attributes:
* "events" is the list of applicable event types;
* "message", not empty if some event types are not applicable,
contains a message explaining those event types are
not applicable.
'''
if not self.applicableEvents:
eventTypes = allEventTypes
message = None
else:
eventTypes = allEventTypes[:]
message = self.applicableEvents(obj.appy(), date, eventTypes,
preComputed)
res = Object(eventTypes=eventTypes, message=message)
if forBrowser:
res.eventTypes = ','.join(res.eventTypes)
if not res.message: res.message = ''
return res
def getEventsAt(self, obj, date):
'''Returns the list of events that exist at some p_date (=day).'''
obj = obj.o # Ensure p_obj is not a wrapper.
if not hasattr(obj.aq_base, self.name): return
years = getattr(obj, self.name)
year = date.year()
if year not in years: return
months = years[year]
month = date.month()
if month not in months: return
days = months[month]
day = date.day()
if day not in days: return
return days[day]
def getEventTypeAt(self, obj, date):
'''Returns the event type of the first event defined at p_day, or None
if unspecified.'''
events = self.getEventsAt(obj, date)
if not events: return
return events[0].eventType
def getEventsByType(self, obj, eventType, minDate=None, maxDate=None,
sorted=True, groupSpanned=False):
'''Returns all the events of a given p_eventType. If p_eventType is
None, it returns events of all types. The return value is a list of
2-tuples whose 1st elem is a DateTime instance and whose 2nd elem is
the event.
If p_sorted is True, the list is sorted in chronological order. Else,
the order is random, but the result is computed faster.
If p_minDate and/or p_maxDate is/are specified, it restricts the
search interval accordingly.
If p_groupSpanned is True, events spanned on several days are
grouped into a single event. In this case, tuples in the result
are 3-tuples: (DateTime_startDate, DateTime_endDate, event).
'''
# Prevent wrong combinations of parameters
if groupSpanned and not sorted:
raise Exception('Events must be sorted if you want to get ' \
'spanned events to be grouped.')
obj = obj.o # Ensure p_obj is not a wrapper.
res = []
if not hasattr(obj, self.name): return res
# Compute "min" and "max" tuples
if minDate:
minYear = minDate.year()
minMonth = (minYear, minDate.month())
minDay = (minYear, minDate.month(), minDate.day())
if maxDate:
maxYear = maxDate.year()
maxMonth = (maxYear, maxDate.month())
maxDay = (maxYear, maxDate.month(), maxDate.day())
# Browse years
years = getattr(obj, self.name)
for year in list(years.keys()):
# Don't take this year into account if outside interval
if minDate and (year < minYear): continue
if maxDate and (year > maxYear): continue
months = years[year]
# Browse this year's months
for month in list(months.keys()):
# Don't take this month into account if outside interval
thisMonth = (year, month)
if minDate and (thisMonth < minMonth): continue
if maxDate and (thisMonth > maxMonth): continue
days = months[month]
# Browse this month's days
for day in list(days.keys()):
# Don't take this day into account if outside interval
thisDay = (year, month, day)
if minDate and (thisDay < minDay): continue
if maxDate and (thisDay > maxDay): continue
events = days[day]
# Browse this day's events
for event in events:
# Filter unwanted events
if eventType and (event.eventType != eventType):
continue
# We have found a event.
date = DateTime('%d/%d/%d UTC' % (year, month, day))
if groupSpanned:
singleRes = [date, None, event]
else:
singleRes = (date, event)
res.append(singleRes)
# Sort the result if required
if sorted: res.sort(key=lambda x: x[0])
# Group events spanned on several days if required
if groupSpanned:
# Browse events in reverse order and merge them when appropriate
i = len(res) - 1
while i > 0:
currentDate = res[i][0]
lastDate = res[i][1]
previousDate = res[i-1][0]
currentType = res[i][2].eventType
previousType = res[i-1][2].eventType
if (previousDate == (currentDate-1)) and \
(previousType == currentType):
# A merge is needed
del res[i]
res[i-1][1] = lastDate or currentDate
i -= 1
return res
def hasEventsAt(self, obj, date, otherEvents):
'''Returns True if, at p_date, an event is found of the same type as
p_otherEvents.'''
if not otherEvents: return False
events = self.getEventsAt(obj, date)
if not events: return False
return events[0].eventType == otherEvents[0].eventType
def getOtherEventsAt(self, obj, date, otherCalendars):
'''Gets events that are defined in p_otherCalendars at some p_date.'''
res = []
for o, field, color in otherCalendars:
events = field.getEventsAt(o.o, date)
if events:
eventType = events[0].eventType
eventName = field.getEventName(o.o, eventType)
info = Object(name=eventName, color=color)
res.append(info)
return res
def getEventName(self, obj, eventType):
'''Gets the name of the event corresponding to p_eventType as it must
appear to the user.'''
if self.eventNameMethod:
return self.eventNameMethod(obj.appy(), eventType)
else:
return obj.translate('%s_event_%s' % (self.labelId, eventType))
def getStartDate(self, obj):
'''Get the start date for this calendar if defined.'''
if self.startDate:
d = self.startDate(obj.appy())
# Return the start date without hour, in UTC.
return DateTime('%d/%d/%d UTC' % (d.year(), d.month(), d.day()))
def getEndDate(self, obj):
'''Get the end date for this calendar if defined.'''
if self.endDate:
d = self.endDate(obj.appy())
# Return the end date without hour, in UTC.
return DateTime('%d/%d/%d UTC' % (d.year(), d.month(), d.day()))
def getDefaultDate(self, obj):
'''Get the default date that must appear as soon as the calendar is
shown.'''
if self.defaultDate:
return self.defaultDate(obj.appy())
else:
return DateTime() # Now
def createEvent(self, obj, date, eventType=None, eventSpan=None,
handleEventSpan=True):
'''Create a new event in the calendar, at some p_date (day).
If p_eventType is given, it is used; else, rq['eventType'] is used.
If p_handleEventSpan is True, we will use p_eventSpan (or
rq["eventSpan"] if p_eventSpan is not given) and also
create the same event for successive days.'''
obj = obj.o # Ensure p_obj is not a wrapper.
rq = obj.REQUEST
# Get values from parameters
if not eventType: eventType = rq['eventType']
if handleEventSpan and not eventSpan:
eventSpan = rq.get('eventSpan', None)
# Split the p_date into separate parts
year, month, day = date.year(), date.month(), date.day()
# Check that the "preferences" dict exists or not.
if not hasattr(obj.aq_base, self.name):
# 1st level: create a IOBTree whose keys are years.
setattr(obj, self.name, IOBTree())
yearsDict = getattr(obj, self.name)
# Get the sub-dict storing months for a given year
if year in yearsDict:
monthsDict = yearsDict[year]
else:
yearsDict[year] = monthsDict = IOBTree()
# Get the sub-dict storing days of a given month
if month in monthsDict:
daysDict = monthsDict[month]
else:
monthsDict[month] = daysDict = IOBTree()
# Get the list of events for a given day
if day in daysDict:
events = daysDict[day]
else:
daysDict[day] = events = PersistentList()
# Create and store the event, excepted if an event already exists.
if not events:
event = Object(eventType=eventType)
events.append(event)
# Span the event on the successive days if required
if handleEventSpan and eventSpan:
nbOfDays = min(int(eventSpan), self.maxEventLength)
for i in range(nbOfDays):
date = date + 1
self.createEvent(obj, date, handleEventSpan=False)
def deleteEvent(self, obj, date, handleEventSpan=True):
'''Deletes an event. It actually deletes all events at p_date.
If p_handleEventSpan is True, we will use rq["deleteNext"] to
delete successive events, too.'''
obj = obj.o # Ensure p_obj is not a wrapper.
if not self.getEventsAt(obj, date): return
daysDict = getattr(obj, self.name)[date.year()][date.month()]
# Remember events, in case we must delete similar ones for next days.
events = self.getEventsAt(obj, date)
del daysDict[date.day()]
rq = obj.REQUEST
if handleEventSpan and 'deleteNext' in rq and \
(rq['deleteNext'] == 'True'):
while True:
date = date + 1
if self.hasEventsAt(obj, date, events):
self.deleteEvent(obj, date, handleEventSpan=False)
else:
break
def process(self, obj):
'''Processes an action coming from the calendar widget, ie, the creation
or deletion of a calendar event.'''
rq = obj.REQUEST
action = rq['actionType']
# Security check
obj.mayEdit(self.writePermission, raiseError=True)
# Get the date for this action
if action == 'createEvent':
return self.createEvent(obj, DateTime(rq['day']))
elif action == 'deleteEvent':
return self.deleteEvent(obj, DateTime(rq['day']))
def getCellStyle(self, obj, date, today):
'''What CSS classes must apply to the table cell representing p_date
in the calendar?'''
res = []
# We must distinguish between past and future dates.
if date < today:
res.append('even')
else:
res.append('odd')
# Week-end days must have a specific style.
if date.aDay() in ('Sat', 'Sun'):
res.append('cellDashed')
return ' '.join(res)
|
tab_cat = "\t Iam tab In"
new_line = "I am split \n on a line"
back_cat = "I am \\ a \\ cat"
fat_cat = """
I will do a list
\t * Cat food
\t * Fishes
\t * Catnip \n \t * GRass
"""
fat_CCAT = '''
hahah
this is ok
also
'''
print tab_cat
print new_line
print back_cat
print fat_cat
print fat_CCAT
|
import os
import re
import sys
from omsdk.sdkcreds import UserCredentials
from omsdk.sdkcenum import EnumWrapper, TypeHelper
from omsdk.lifecycle.sdkcredentials import iBaseCredentialsApi
from omdrivers.enums.iDRAC.iDRACEnums import *
from omdrivers.enums.iDRAC.iDRAC import Privilege_UsersTypes
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
class iDRACCredsMgmt(iBaseCredentialsApi):
def __init__(self, entity):
if PY2:
super(iDRACCredsMgmt, self).__init__(entity)
else:
super().__init__(entity)
self._job_mgr = entity.job_mgr
self._config_mgr = entity.config_mgr
self.eUserPrivilegeEnum = UserPrivilegeEnum
@property
def Users(self):
return self._config_mgr._sysconfig.iDRAC.Users
#######
# Creating a user
#
# user = idrac.user_mgr.Users.new(
# user.<attribute_name> = value
# user.<attribute_name> = value
# user.<attribute_name> = value
# )
# idrac.user_mgr.Users.new(
# UserName_Users = username,
# Password_Users = password,
# Privilege_Users = Privilege_UsersTypes.Operator,
# IpmiLanPrivilege_Users = "Administrator",
# IpmiSerialPrivilege_Users = "Administrator",
# Enable_Users = "Enabled",
# SolEnable_Users = "Enabled",
# ProtocolEnable_Users = "Disabled",
# AuthenticationProtocol_Users = "SHA",
# PrivacyProtocol_Users = "AES"
# )
#
# idrac.config_mgr.apply_changes()
#
# Note: for enum types you can give enum or corresponding string value
# Privilege_UsersTypes.Administrator or "511"
# For details on variable types look at omdrivers.types.iDRAC.iDRAC
# and possible values of enum in omdrivers.enums.iDRAC.iDRAC
#
# don't forget to catch for ValueEror and AttributeError exceptions!
# You will get that for following reasons:
# - Wrong/invalid value provided (enum, string)
# - All user entries are exhausted
# - Duplicate user entry
#
# Until you do apply_changes, they are not committed.
#
#######
#######
# Modifying a user
#
# user = idrac.user_mgr.Users.find_first(UserName_Users = username)
#
# user.<attribute_name> = value
# user.<attribute_name> = value
# user.<attribute_name> = value
#
# value is None => treated as no change
# value is '' => treated as equivalent nullifying the object
# value is invalid => ValueError is thrown
# idrac.config_mgr.apply_changes()
#
# don't forget to catch for ValueEror and AttributeError exceptions!
#
#######
#######
# deleting a user
#
# idrac.user_mgr.iDRAC.Users.remove(UserName_Users = username)
# idrac.config_mgr.apply_changes()
#
# don't forget to catch for ValueEror and AttributeError exceptions!
#
#######
|
def f():
"""
Comment
"""
|
def process_rawq(self, cmd, cmd2):
while self.rawq:
if self.iacseq:
if cmd:
pass
elif cmd2:
if self.option_callback:
self.option = 2
else:
self.option = 3
def listener(data):
while 1:
if data:
data = 1
else:
data = 2
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0025_office_min_work_hours_for_credit'),
]
operations = [
migrations.AddField(
model_name='hoursbalance',
name='adjusted_balance',
field=models.IntegerField(default=0, verbose_name='saldo acumulado ajustado'),
),
migrations.AlterField(
model_name='office',
name='hours_control_start_date',
field=models.DateField(blank=True, null=True, verbose_name='data de início do controle de horas'),
),
migrations.AlterField(
model_name='office',
name='min_work_hours_for_credit',
field=models.DurationField(default=datetime.timedelta(0, 25200), verbose_name='jornada diária necessária para acumular créditos'),
),
]
|
import os
from tornado.web import addslash
import sickchill.start
from sickchill import logger, settings
from sickchill.helper import try_int
from sickchill.oldbeard import config, filters, helpers, ui
from sickchill.views.common import PageTemplate
from sickchill.views.routes import Route
from . import Config
@Route('/config/search(/?.*)', name='config:search')
class ConfigSearch(Config):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@addslash
def index(self, *args_, **kwargs_):
t = PageTemplate(rh=self, filename="config_search.mako")
return t.render(submenu=self.ConfigMenu(), title=_('Config - Episode Search'),
header=_('Search Settings'), topmenu='config',
controller="config", action="search")
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_category_anime=None, sab_category_backlog=None, sab_category_anime_backlog=None, sab_host=None,
nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_category_backlog=None, nzbget_category_anime=None,
nzbget_category_anime_backlog=None, nzbget_priority=None, nzbget_host=None, nzbget_use_https=None,
backlog_days=None, backlog_frequency=None, dailysearch_frequency=None, nzb_method=None, torrent_method=None, usenet_retention=None,
download_propers=None, check_propers_interval=None, allow_high_priority=None, sab_forced=None,
randomize_providers=None, use_failed_downloads=None, delete_failed=None, backlog_missing_only=None,
torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_label_anime=None, torrent_path=None, torrent_path_incomplete=None, torrent_verify_cert=None,
torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None,
torrent_rpcurl=None, torrent_auth_type=None, ignore_words=None, trackers_list=None, require_words=None, ignored_subs_list=None,
syno_dsm_host=None, syno_dsm_user=None, syno_dsm_pass=None, syno_dsm_path=None, quality_allow_hevc=False, prefer_words=None):
results = []
if not config.change_nzb_dir(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_torrent_dir(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_daily_search_frequency(dailysearch_frequency)
config.change_backlog_frequency(backlog_frequency)
settings.BACKLOG_DAYS = try_int(backlog_days, 7)
settings.USE_NZBS = config.checkbox_to_value(use_nzbs)
settings.USE_TORRENTS = config.checkbox_to_value(use_torrents)
settings.NZB_METHOD = nzb_method
settings.TORRENT_METHOD = torrent_method
settings.USENET_RETENTION = try_int(usenet_retention, 500)
settings.IGNORE_WORDS = ignore_words if ignore_words else ""
settings.TRACKERS_LIST = trackers_list if trackers_list else ""
settings.REQUIRE_WORDS = require_words if require_words else ""
settings.PREFER_WORDS = prefer_words if prefer_words else ""
settings.IGNORED_SUBS_LIST = ignored_subs_list if ignored_subs_list else ""
settings.RANDOMIZE_PROVIDERS = config.checkbox_to_value(randomize_providers)
config.change_download_propers(download_propers)
settings.CHECK_PROPERS_INTERVAL = check_propers_interval
settings.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority)
settings.QUALITY_ALLOW_HEVC = config.checkbox_to_value(quality_allow_hevc)
settings.USE_FAILED_DOWNLOADS = config.checkbox_to_value(use_failed_downloads)
settings.DELETE_FAILED = config.checkbox_to_value(delete_failed)
settings.BACKLOG_MISSING_ONLY = config.checkbox_to_value(backlog_missing_only)
settings.SAB_USERNAME = sab_username
settings.SAB_PASSWORD = filters.unhide(settings.SAB_PASSWORD, sab_password)
settings.SAB_APIKEY = filters.unhide(settings.SAB_APIKEY, sab_apikey.strip())
settings.SAB_CATEGORY = sab_category
settings.SAB_CATEGORY_BACKLOG = sab_category_backlog
settings.SAB_CATEGORY_ANIME = sab_category_anime
settings.SAB_CATEGORY_ANIME_BACKLOG = sab_category_anime_backlog
settings.SAB_HOST = config.clean_url(sab_host)
settings.SAB_FORCED = config.checkbox_to_value(sab_forced)
settings.NZBGET_USERNAME = nzbget_username
settings.NZBGET_PASSWORD = filters.unhide(settings.NZBGET_PASSWORD, nzbget_password)
settings.NZBGET_CATEGORY = nzbget_category
settings.NZBGET_CATEGORY_BACKLOG = nzbget_category_backlog
settings.NZBGET_CATEGORY_ANIME = nzbget_category_anime
settings.NZBGET_CATEGORY_ANIME_BACKLOG = nzbget_category_anime_backlog
settings.NZBGET_HOST = config.clean_host(nzbget_host)
settings.NZBGET_USE_HTTPS = config.checkbox_to_value(nzbget_use_https)
settings.NZBGET_PRIORITY = try_int(nzbget_priority, 100)
settings.TORRENT_USERNAME = torrent_username
settings.TORRENT_PASSWORD = filters.unhide(settings.TORRENT_PASSWORD, torrent_password)
settings.TORRENT_LABEL = torrent_label
settings.TORRENT_LABEL_ANIME = torrent_label_anime
settings.TORRENT_VERIFY_CERT = config.checkbox_to_value(torrent_verify_cert)
settings.TORRENT_PATH = torrent_path.rstrip('/\\')
settings.TORRENT_PATH_INCOMPLETE = torrent_path_incomplete.rstrip('/\\')
settings.TORRENT_SEED_TIME = torrent_seed_time
settings.TORRENT_PAUSED = config.checkbox_to_value(torrent_paused)
settings.TORRENT_HIGH_BANDWIDTH = config.checkbox_to_value(torrent_high_bandwidth)
settings.TORRENT_HOST = config.clean_url(torrent_host)
settings.TORRENT_RPCURL = torrent_rpcurl
settings.TORRENT_AUTH_TYPE = torrent_auth_type
settings.SYNOLOGY_DSM_HOST = config.clean_url(syno_dsm_host)
settings.SYNOLOGY_DSM_USERNAME = syno_dsm_user
settings.SYNOLOGY_DSM_PASSWORD = filters.unhide(settings.SYNOLOGY_DSM_PASSWORD, syno_dsm_pass)
settings.SYNOLOGY_DSM_PATH = syno_dsm_path.rstrip('/\\')
# This is a PITA, but lets merge the settings if they only set DSM up in one section to save them some time
if settings.TORRENT_METHOD == 'download_station':
if not settings.SYNOLOGY_DSM_HOST:
settings.SYNOLOGY_DSM_HOST = settings.TORRENT_HOST
if not settings.SYNOLOGY_DSM_USERNAME:
settings.SYNOLOGY_DSM_USERNAME = settings.TORRENT_USERNAME
if not settings.SYNOLOGY_DSM_PASSWORD:
settings.SYNOLOGY_DSM_PASSWORD = settings.TORRENT_PASSWORD
if not settings.SYNOLOGY_DSM_PATH:
settings.SYNOLOGY_DSM_PATH = settings.TORRENT_PATH
if settings.NZB_METHOD == 'download_station':
if not settings.TORRENT_HOST:
settings.TORRENT_HOST = settings.SYNOLOGY_DSM_HOST
if not settings.TORRENT_USERNAME:
settings.TORRENT_USERNAME = settings.SYNOLOGY_DSM_USERNAME
if not settings.TORRENT_PASSWORD:
settings.TORRENT_PASSWORD = settings.SYNOLOGY_DSM_PASSWORD
if not settings.TORRENT_PATH:
settings.TORRENT_PATH = settings.SYNOLOGY_DSM_PATH
helpers.manage_torrents_url(reset=True)
sickchill.start.save_config()
if len(results) > 0:
for x in results:
logger.exception(x)
ui.notifications.error(_('Error(s) Saving Configuration'),
'<br>\n'.join(results))
else:
ui.notifications.message(_('Configuration Saved'), os.path.join(settings.CONFIG_FILE))
return self.redirect("/config/search/")
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'yourip', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'yourip',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'b819up*y32rt&$+!_jmczpu$%jvwvjpt!80dc5&mmlzv94k^%o'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'yourip.urls'
WSGI_APPLICATION = 'yourip.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'findmyip',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
import logging
import pygame
import os
import time
import backlight
import sprites
import volumebar
import controlbar
import images
import mpc
import signalcatcher
import gpio
import timer
import trackinfo
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('radioplayer')
def main():
screen.fill(background)
if 'MPD_HOST' not in os.environ:
print "No MPD_HOST var defined"
exit()
mpc_player = mpc.Mpc(os.environ['MPD_HOST'])
gpio.GpioMonitor(gpio.BUTTON_RIGHT, backlight.switch_backlight_off)
gpio.GpioMonitor(gpio.BUTTON_RIGHT_MIDDLE, mpc_player.song_next)
gpio.GpioMonitor(gpio.BUTTON_LEFT_MIDDLE, mpc_player.song_previous)
control_bar = pygame.sprite.RenderPlain(
controlbar.DisplaySongPrevious(mpc_player),
controlbar.DisplaySongNext(mpc_player),
controlbar.DisplayPlayerStop(mpc_player),
controlbar.DisplayPlayerToggle(mpc_player),
)
volume_bar = pygame.sprite.RenderPlain(
volumebar.DisplayVolumeDown(mpc_player),
volumebar.DisplayVolumeMute(mpc_player),
volumebar.DisplayVolumeUp(mpc_player)
)
all_clickable = pygame.sprite.Group()
all_clickable.add(control_bar)
all_clickable.add(volume_bar)
track_info = pygame.sprite.RenderPlain(
trackinfo.DisplayPlayerTrack(mpc_player),
trackinfo.DisplayPlayerProgress(mpc_player)
)
timer.set_timer()
clock = pygame.time.Clock()
running = 1
mpc_player.refresh_volume()
mpc_player.refresh_play_status()
mpc_player.refresh_current_song()
mpc_player.refresh_time()
mpc_player.enable_idle()
while running:
for event in pygame.event.get():
if timer.is_timer(event.type):
switch_backlight_off()
if event.type == pygame.MOUSEBUTTONDOWN:
on_touch(all_clickable)
if event.type == pygame.QUIT:
running = 0
track_info.draw(screen)
control_bar.draw(screen)
volume_bar.draw(screen)
pygame.display.flip()
clock.tick(20)
def switch_backlight_off():
backlight.switch_backlight_off()
timer.clear_timer()
def on_touch(all_clickable):
timer.set_timer()
if backlight.is_screensaver_enabled():
backlight.switch_backlight_on()
return
mouse_rect = pygame.sprite.Sprite()
mouse_rect.rect = pygame.Rect(pygame.mouse.get_pos(), (1, 1))
imagehit = pygame.sprite.spritecollideany(mouse_rect, all_clickable, False)
if imagehit:
imagehit.run_callback()
size = width, height = 320, 240
logging.debug('set mode with size')
screen = pygame.display.set_mode(size)
logging.debug('mouse set visible')
pygame.mouse.set_visible(False)
background = 82, 92, 102
red = 255, 0, 0
backlight.init_screensaver_control()
logging.debug('main launching')
main() # check for key presses
|
from harpia.GladeWindow import GladeWindow
from harpia.amara import binderytools as bt
import gtk
from harpia.s2icommonproperties import S2iCommonProperties
import os
import gettext
APP='harpia'
DIR=os.environ['HARPIA_DATA_DIR']+'po'
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
class Properties( GladeWindow, S2iCommonProperties ):
#----------------------------------------------------------------------
def __init__( self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir+'glade/division.glade'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'DIVIBackgroundColor',
'DIVIBorderColor',
'DIVIHelpView'
]
handlers = [
'on_DIVIBackColorButton_clicked',
'on_DIVIBorderColorButton_clicked',
'on_division_cancel_clicked',
'on_division_confirm_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.widgets['Properties'].set_icon_from_file(self.m_sDataDir+"images/harpia_ave.png")
#load properties values
#there is no properties
#load border color
self.m_oBorderColor = self.m_oS2iBlockProperties.GetBorderColor()
t_nBorderRed = self.m_oBorderColor[0] * 257
t_nBorderGreen = self.m_oBorderColor[1] * 257
t_nBorderBlue = self.m_oBorderColor[2] * 257
t_oBorderColor = gtk.gdk.Color(red=t_nBorderRed,green=t_nBorderGreen,blue=t_nBorderBlue)
self.widgets['DIVIBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oBorderColor)
#load block color
self.m_oBackColor = self.m_oS2iBlockProperties.GetBackColor()
t_nBackRed = self.m_oBackColor[0] * 257
t_nBackGreen = self.m_oBackColor[1] * 257
t_nBackBlue = self.m_oBackColor[2] * 257
t_oBackColor = gtk.gdk.Color(red=t_nBackRed,green=t_nBackGreen,blue=t_nBackBlue)
self.widgets['DIVIBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oBackColor)
#load help text
t_oS2iHelp = bt.bind_file(self.m_sDataDir+"help/division"+ _("_en.help"))
t_oTextBuffer = gtk.TextBuffer()
t_oTextBuffer.set_text( unicode( str( t_oS2iHelp.help.content) ) )
self.widgets['DIVIHelpView'].set_buffer( t_oTextBuffer )
#----------------------------------------------------------------------
def __del__(self):
pass
#----------------------------------------------------------------------
def on_division_cancel_clicked( self, *args ):
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_division_confirm_clicked( self, *args ):
self.m_oS2iBlockProperties.SetBorderColor( self.m_oBorderColor )
self.m_oS2iBlockProperties.SetBackColor( self.m_oBackColor )
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_DIVIBackColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['DIVIBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBackColor[0] = t_oColor.red / 257
self.m_oBackColor[1] = t_oColor.green / 257
self.m_oBackColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
def on_DIVIBorderColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['DIVIBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBorderColor[0] = t_oColor.red / 257
self.m_oBorderColor[1] = t_oColor.green / 257
self.m_oBorderColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smsbot.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
"""
========================================================================
Functionalities and filters connected with ITK/VTK (:mod:`medpy.itkvtk`)
========================================================================
.. currentmodule:: medpy.itkvtk
The methods in this module require the `WrapITK <https://code.google.com/p/wrapitk/>`_ Python bindings for OTK/VTK.
They largely exist to ease the usage of these bindings, which have their flaws and oddities.
Please access sub-packages directly to avoid dependency clashes for if only ITK or only VTK bindings are available.
Image filter :mod:`medy.itkvtk.filter`
======================================
These methods wrap ITK filter such that they can be applied to images represented as numpy arrays.
Feel free to take a look at the code when you plan to write your own wrappers for ITK image filters.
.. module:: medpy.itkvtk.filter.image
.. autosummary::
:toctree: generated/
gradient_magnitude
watershed
ITK utilities :mod:`medpy.itkvtk.utilities.itku`
================================================
.. module:: medpy.itkvtk.utilities.itku
.. autosummary::
:toctree: generated/
getInformation
getInformationWithScalarRange
saveImageMetaIO
saveImage
getImageFromArray
getArrayFromImage
getImageType
getImageTypeFromArray
getImageTypeFromFile
VTK utilities :mod:`medpy.itkvtk.utilities.vtku`
================================================
.. module:: medpy.itkvtk.utilities.vtku
.. autosummary::
:toctree: generated/
getInformation
getImageTypeFromVtk
saveImageMetaIO
"""
import filter
import utilities
__all__ = [s for s in dir() if not s.startswith('_')]
|
BOT_NAME = 'sat'
SPIDER_MODULES = ['sat.spiders']
NEWSPIDER_MODULE = 'sat.spiders'
USER_AGENT = 'sat (+http://www.openearth.eu)'
DOWNLOADER_MIDDLEWARE = [
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware'
]
ITEM_PIPELINES = {
'sat.pipelines.JsonWriterPipeline' : 100,
'sat.pipelines.DuplicatePipeline': 300,
'sat.pipelines.RabbitMQPipeline': 800
}
|
import codecs
import os
import re
from setuptools import setup
version = 'devel'
changelog = 'debian/changelog'
if os.path.exists(changelog):
head = codecs.open(changelog, encoding='utf-8').readline()
match = re.compile('.*\((.*)\).*').match(head)
if match:
version = match.group(1)
setup(
name='snapcraft',
version=version,
description='Easily craft snaps from multiple sources',
author_email='snapcraft@lists.snapcraft.io',
url='https://github.com/snapcore/snapcraft',
packages=['snapcraft',
'snapcraft.internal',
'snapcraft.internal.states',
'snapcraft.plugins',
'snapcraft.storeapi'],
package_data={'snapcraft.internal': ['manifest.txt']},
scripts=['bin/snapcraft', 'bin/snapcraft-parser'],
data_files=[
('share/snapcraft/schema',
['schema/' + x for x in os.listdir('schema')]),
('share/snapcraft/libraries',
['libraries/' + x for x in os.listdir('libraries')]),
],
install_requires=[
'pyxdg',
'requests',
],
test_suite='snapcraft.tests',
license='GPL v3',
classifiers=(
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Build Tools',
'Topic :: System :: Software Distribution',
),
)
|
import numpy as np
t1ma_nm1 = {'0.90,009': 1.83,
'0.90,019': 1.73,
'0.90,029': 1.70,
'0.90,039': 1.68,
'0.90,059': 1.67,
'0.90,099': 1.66,
'0.90,199': 1.65,
'0.99,009': 3.25,
'0.99,019': 2.86,
'0.99,029': 2.76,
'0.99,039': 2.70,
'0.99,059': 2.66,
'0.99,099': 2.62,
'0.99,199': 2.58}
def get_t1ma_nm1(
onema,
nm1):
return t1ma_nm1['{0:.2f},{1:0>3}'.format(onema, nm1)]
class Wiener:
def __init__(
self,
dt = 1.,
nsteps = 128,
noise_dimension = 1,
solution_shape = [20, 16],
p = 5):
self.dt = dt
self.nsteps = nsteps
if len(solution_shape) == 2:
self.nbatches = solution_shape[0]
self.ntraj = solution_shape[1]
self.noise_dimension = noise_dimension
self.shape = [noise_dimension] + solution_shape
self.solution_shape = solution_shape
self.p = p
self.r = np.arange(1,self.p+1, 1).astype(np.float)
for i in range(len(self.shape)):
self.r = np.expand_dims(self.r, axis = len(self.r.shape))
self.rho = 1/12. - .5*np.sum(1/self.r**2, axis = 0)/np.pi**2
self.alpha = np.pi**2/180 - .5*np.sum(1/self.r**4, axis = 0)/np.pi**2
self.Delta = dt
self.sqrtD = np.sqrt(self.Delta)
return None
def initialize(
self,
rseed = None):
np.random.seed(rseed)
self.dW = np.sqrt(self.dt)*np.random.randn(
*tuple([self.nsteps] + self.shape))
self.W = np.zeros(
tuple([self.nsteps + 1] + self.shape),
dtype = self.dW.dtype)
for t in range(self.nsteps):
self.W[t+1] = self.W[t] + self.dW[t]
return None
def get_time(
self):
return self.dt*np.array(range(self.W.shape[0]))
def coarsen(
self,
n = 2):
new_object = Wiener(
dt = n*self.dt,
nsteps = int(self.nsteps/n),
noise_dimension = self.noise_dimension,
solution_shape = self.solution_shape,
p = self.p)
new_object.W = self.W[::n]
return new_object
def get_jj(self, Jj):
# Gaussian
zeta = np.random.randn(*tuple([self.p] + list(Jj.shape)))
eta = np.random.randn(*tuple([self.p] + list(Jj.shape)))
mu = np.random.randn(*tuple(Jj.shape))
phi = np.random.randn(*tuple(Jj.shape))
# additional quantities
a = (- np.sqrt(2*self.Delta) * np.sum(eta / self.r, axis=0) / np.pi
- (2*np.sqrt(self.Delta*self.rho)*mu))
A = np.sum((zeta[:, :, np.newaxis]*eta[:, np.newaxis, :] - eta[:, :, np.newaxis]*zeta[:, np.newaxis, :])
/ self.r[:, np.newaxis], axis = 0) / (2*np.pi)
# multiple Stratonovich integrals
Jj0 = self.Delta*(Jj + a) / 2
J0j = self.Delta*(Jj - a) / 2
Jjj = (Jj[:, np.newaxis]*Jj[np.newaxis, :] / 2
- (a [np.newaxis, :]*Jj[:, np.newaxis] - Jj[np.newaxis, :]*a[:, np.newaxis])/2
+ self.Delta*A)
# multiple Ito integrals
Ijj = Jjj.copy()
for j in range(Ijj.shape[0]):
Ijj[j,j] -= .5*self.Delta
return Jj0, J0j, Jjj, Ijj
def get_jjj(self, Jj):
# Gaussian
zeta = np.random.randn(*tuple([self.p] + list(Jj.shape)))
eta = np.random.randn(*tuple([self.p] + list(Jj.shape)))
mu = np.random.randn(*tuple(Jj.shape))
phi = np.random.randn(*tuple(Jj.shape))
# additional quantities
a = (- np.sqrt(2*self.Delta) * np.sum(eta / self.r, axis=0) / np.pi
- (2*np.sqrt(self.Delta*self.rho )*mu))
b = ( np.sqrt(self.Delta/2) * np.sum(eta / (self.r**2), axis=0)
+ ( np.sqrt(self.Delta*self.alpha)*phi))
A = np.sum((zeta[:, :, np.newaxis]* eta[:, np.newaxis, :] - eta[:, :, np.newaxis]*zeta[:, np.newaxis, :])
/ self.r [:, np.newaxis], axis = 0) / (2*np.pi)
B = np.sum((zeta[:, :, np.newaxis]*zeta[:, np.newaxis, :] + eta[:, :, np.newaxis]* eta[:, np.newaxis, :])
/ (self.r**2)[:, np.newaxis], axis = 0) / (4*np.pi**2)
C = np.zeros(B.shape, B.dtype)
for i in range(self.p):
for k in range(self.p):
if not k == i:
C -= ((self.r[i] / (self.r[i]**2 - self.r[k]**2))
* (zeta[i, :, np.newaxis]*zeta[k, np.newaxis, :]/self.r[k]
- eta [i, :, np.newaxis]* eta[k, np.newaxis, :]*self.r[k]/self.r[i]))
C /= 2*np.pi**2
# multiple Stratonovich integrals
Jj0 = self.Delta*(Jj + a) / 2
J0j = self.Delta*(Jj - a) / 2
Jjj = (Jj[:, np.newaxis]*Jj[np.newaxis, :] / 2
- (a [np.newaxis, :]*Jj[:, np.newaxis] - Jj[np.newaxis, :]*a[:, np.newaxis])/2
+ self.Delta*A)
J0j0 = self.Delta**2*(Jj/6 - b/np.pi)
Jj00 = self.Delta**2*(Jj/6 + b/(2*np.pi) + a/4)
J00j = self.Delta**2*(Jj/6 + b/(2*np.pi) - a/4)
Jj0j = (self.Delta*Jj[:, np.newaxis]*Jj[np.newaxis, :]/6
+ a[:, np.newaxis]*J0j[np.newaxis, :]/2
+ self.Delta*(b[:, np.newaxis]*Jj[np.newaxis, :] + Jj[:, np.newaxis]*b[np.newaxis, :]) / (2*np.pi)
- self.Delta**2 * B
- self.Delta*Jj[:, np.newaxis]*a[np.newaxis, :] / 4)
J0jj = (self.Delta*Jj[:, np.newaxis]*Jj[np.newaxis, :]/6
- self.Delta*Jj[:, np.newaxis]*a[np.newaxis, :]/4
+ self.Delta*(-2*b[:, np.newaxis]*Jj[np.newaxis, :] + Jj[:, np.newaxis]*b[np.newaxis, :]) / (2*np.pi)
+ self.Delta**2 * (B + C + .5*A))
Jjj0 = (self.Delta*Jj[:, np.newaxis]*Jj[np.newaxis, :]/2
- self.Delta*(Jj[:, np.newaxis]*a[np.newaxis, :] - a[:, np.newaxis]*Jj[np.newaxis, :]) / 2
+ self.Delta**2 * A
- Jj0j - J0jj)
# multiple Ito integrals
Ijj = Jjj.copy()
for j in range(Ijj.shape[0]):
Ijj[j,j] -= .5*self.Delta
return Jj0, J0j, Jjj, Jjj0, Ijj
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: James Hogarth
module: jenkins_script
short_description: Executes a groovy script in the jenkins instance
version_added: '2.3'
description:
- The C(jenkins_script) module takes a script plus a dict of values
to use within the script and returns the result of the script being run.
options:
script:
description:
- The groovy script to be executed.
This gets passed as a string Template if args is defined.
required: true
default: null
url:
description:
- The jenkins server to execute the script against. The default is a local
jenkins instance that is not being proxied through a webserver.
required: false
default: http://localhost:8080
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
This should only set to C(no) used on personally controlled sites
using self-signed certificates as it avoids verifying the source site.
required: false
default: True
user:
description:
- The username to connect to the jenkins server with.
required: false
default: null
password:
description:
- The password to connect to the jenkins server with.
required: false
default: null
timeout:
description:
- The request timeout in seconds
required: false
default: 10
version_added: "2.4"
args:
description:
- A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
required: false
default: null
notes:
- Since the script can do anything this does not report on changes.
Knowing the script is being run it's important to set changed_when
for the ansible output to be clear on any alterations made.
'''
EXAMPLES = '''
- name: Obtaining a list of plugins
jenkins_script:
script: 'println(Jenkins.instance.pluginManager.plugins)'
user: admin
password: admin
- name: Setting master using a variable to hold a more complicate script
vars:
setmaster_mode: |
import jenkins.model.*
instance = Jenkins.getInstance()
instance.setMode(${jenkins_mode})
instance.save()
- name: use the variable as the script
jenkins_script:
script: "{{ setmaster_mode }}"
args:
jenkins_mode: Node.Mode.EXCLUSIVE
- name: interacting with an untrusted HTTPS connection
jenkins_script:
script: "println(Jenkins.instance.pluginManager.plugins)"
user: admin
password: admin
url: https://localhost
validate_certs: no
'''
RETURN = '''
output:
description: Result of script
returned: success
type: string
sample: 'Result: true'
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
def is_csrf_protection_enabled(module):
resp, info = fetch_url(module,
module.params['url'] + '/api/json',
method='GET')
if info["status"] != 200:
module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
content = to_native(resp.read())
return json.loads(content).get('useCrumbs', False)
def get_crumb(module):
resp, info = fetch_url(module,
module.params['url'] + '/crumbIssuer/api/json',
method='GET')
if info["status"] != 200:
module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
content = to_native(resp.read())
return json.loads(content)
def main():
module = AnsibleModule(
argument_spec=dict(
script=dict(required=True, type="str"),
url=dict(required=False, type="str", default="http://localhost:8080"),
validate_certs=dict(required=False, type="bool", default=True),
user=dict(required=False, type="str", default=None),
password=dict(required=False, no_log=True, type="str", default=None),
timeout=dict(required=False, type="int", default=10),
args=dict(required=False, type="dict", default=None)
)
)
if module.params['user'] is not None:
if module.params['password'] is None:
module.fail_json(msg="password required when user provided", output='')
module.params['url_username'] = module.params['user']
module.params['url_password'] = module.params['password']
module.params['force_basic_auth'] = True
if module.params['args'] is not None:
from string import Template
try:
script_contents = Template(module.params['script']).substitute(module.params['args'])
except KeyError as err:
module.fail_json(msg="Error with templating variable: %s" % err, output='')
else:
script_contents = module.params['script']
headers = {}
if is_csrf_protection_enabled(module):
crumb = get_crumb(module)
headers = {crumb['crumbRequestField']: crumb['crumb']}
resp, info = fetch_url(module,
module.params['url'] + "/scriptText",
data=urlencode({'script': script_contents}),
headers=headers,
method="POST",
timeout=module.params['timeout'])
if info["status"] != 200:
module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
result = to_native(resp.read())
if 'Exception:' in result and 'at java.lang.Thread' in result:
module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
module.exit_json(
output=result,
)
if __name__ == '__main__':
main()
|
import logging
import os.path
from pylons import request
from dirac.lib.base import *
from dirac.lib.webconfig import gWebConfig
from dirac.lib.sanitizeInputs import sanitizeAllWebInputs
from DIRAC import gLogger
from DIRAC.Core.DISET.AuthManager import AuthManager
from DIRAC.Core.Security import CS, X509Certificate
gAuthManager = AuthManager( "%s/Authorization" % gWebConfig.getWebSection() )
log = logging.getLogger( __name__ )
diracLogger = gLogger.getSubLogger( "Credentials" )
def checkURL( environ, routesDict ):
#Before all we try to sanitize inputs
sanitizeAllWebInputs( environ )
#Time for Auth!
routesDict[ 'dsetup' ] = __checkSetup( routesDict[ 'dsetup' ] )
userDN, userName = __checkDN( environ )
userGroup, availableGroups = __checkGroup( userName, routesDict[ 'dgroup' ] )
routesDict[ 'dgroup' ] = userGroup
environ[ 'DIRAC.userCredentials' ] = { 'DN' : userDN,
'username' : userName,
'group' : userGroup,
'availableGroups' : availableGroups
}
if not authorizeAction( routesDict, environ[ 'DIRAC.userCredentials' ] ):
routesDict[ 'controller' ] = "web/userdata"
routesDict[ 'action' ] = "unauthorizedAction"
routesDict[ 'id' ] = None
return True
def __checkSetup( setup ):
if setup not in gWebConfig.getSetups():
return gWebConfig.getDefaultSetup()
return setup
def __checkDN( environ ):
userDN = False
if 'SERVER_SOFTWARE' not in environ:
diracLogger.info( "Getting the DN from /Website/DebugDN" )
userDN = gWebConfig.getDebugDN()
if 'HTTPS' in environ and environ[ 'HTTPS' ] == 'on':
if 'SSL_CLIENT_S_DN' in environ:
userDN = environ[ 'SSL_CLIENT_S_DN' ]
elif 'SSL_CLIENT_CERT' in environ:
userCert = X509Certificate.X509Certificate()
result = userCert.loadFromString( environ[ 'SSL_CLIENT_CERT' ] )
if not result[ 'OK' ]:
diracLogger.error( "Could not load SSL_CLIENT_CERT: %s" % result[ 'Message' ] )
userName = "anonymous"
else:
userDN = userCert.getSubjectDN()[ 'Value' ]
else:
diracLogger.error( "Web server is not properly configured to get SSL_CLIENT_S_DN or SSL_CLIENT_CERT in env" )
if not userDN:
userName = "anonymous"
else:
retVal = CS.getUsernameForDN( userDN )
if not retVal[ 'OK' ]:
userName = "anonymous"
else:
userName = retVal[ 'Value' ]
diracLogger.info( "Got username for user" " => %s for %s" % ( userName, userDN ) )
return ( userDN, userName )
def __getCN( environ ):
userCN = "unknown"
if 'HTTPS' in environ and environ[ 'HTTPS' ] == 'on':
if 'SSL_CLIENT_I_DN' in environ:
userCN = environ[ 'SSL_CLIENT_I_DN' ]
elif 'SSL_CLIENT_CERT' in environ:
userCert = X509Certificate.X509Certificate()
result = userCert.loadFromString( environ[ 'SSL_CLIENT_CERT' ] )
if not result[ 'OK' ]:
diracLogger.error( "Could not load SSL_CLIENT_CERT: %s" % result[ 'Message' ] )
else:
userCN = userCert.getIssuerDN()[ 'Value' ]
else:
diracLogger.error( "Web server is not properly configured to get SSL_CLIENT_I_DN or SSL_CLIENT_CERT in env" )
diracLogger.info( "Got CN %s" % userCN )
return userCN
def __checkGroup( userName, group ):
retVal = CS.getGroupsForUser( userName )
if not retVal[ 'OK' ]:
availableGroups = []
else:
availableGroups = retVal[ 'Value' ]
if group in availableGroups:
return ( group, availableGroups )
defaultGroup = False
for tgroup in gWebConfig.getDefaultGroups():
if tgroup in availableGroups:
defaultGroup = tgroup
break
if not defaultGroup:
defaultGroup = "visitor"
return ( defaultGroup, availableGroups )
def checkUserCredentials():
routesDict = request.environ[ 'pylons.routes_dict' ]
environ = request.environ
if 'dsetup' in routesDict:
routesDict[ 'dsetup' ] = __checkSetup( routesDict[ 'dsetup' ] )
else:
routesDict[ 'dsetup' ] = gWebConfig.getDefaultSetup()
userDN, userName = __checkDN( environ )
if 'dgroup' not in routesDict:
routesDict[ 'dgroup' ] = 'visitor'
userGroup, availableGroups = __checkGroup( userName, routesDict[ 'dgroup' ] )
routesDict[ 'dgroup' ] = userGroup
environ[ 'DIRAC.userCredentials' ] = { 'DN' : userDN,
'username' : userName,
'group' : userGroup,
'availableGroups' : availableGroups
}
if userDN and userName == "anonymous":
userCN = __getCN( environ )
if userCN:
environ[ 'DIRAC.userCredentials' ]['CN'] = userCN
def authorizeAction( routeDict = False, userCred = False ):
if not routeDict:
routeDict = request.environ[ 'pylons.routes_dict' ]
actionPath = "%s/%s" % ( routeDict[ 'controller' ], routeDict[ 'action' ] )
if not userCred:
userCred = request.environ[ 'DIRAC.userCredentials' ]
userRep = "%s@%s" % ( userCred[ 'username' ], userCred[ 'group' ] )
diracLogger.info( "Testing %s for %s action" % ( userRep, actionPath ) )
if gAuthManager.authQuery( actionPath, userCred, defaultProperties = 'all' ):
diracLogger.info( "Authorized %s for %s" % ( actionPath, userRep ) )
return True
diracLogger.info( "NOT authorized %s for %s" % ( actionPath, userRep ) )
return False
def getUsername():
if 'DIRAC.userCredentials' in request.environ:
return request.environ[ 'DIRAC.userCredentials' ][ 'username' ]
else:
return "anonymous"
def getUserDN():
if 'DIRAC.userCredentials' in request.environ:
return request.environ[ 'DIRAC.userCredentials' ][ 'DN' ]
else:
return ""
def getSelectedSetup():
setup = request.environ[ 'pylons.routes_dict' ][ 'dsetup' ]
if setup not in gWebConfig.getSetups():
return gWebConfig.getDefaultSetup()
return setup
def getSelectedGroup():
if 'DIRAC.userCredentials' in request.environ:
return request.environ[ 'DIRAC.userCredentials' ][ 'group' ]
else:
return "visitor"
def getProperties( group = False ):
if not group:
group = getSelectedGroup()
if 'visitor' == group:
return []
return CS.getPropertiesForGroup( group )
def getAvailableGroups():
if 'DIRAC.userCredentials' in request.environ:
return request.environ[ 'DIRAC.userCredentials' ][ 'availableGroups' ]
else:
return []
|
from os import path
import controller, webbrowser, tkMessageBox
import Tkinter as tk
class View(tk.Frame):
controller = None
current_dir = "~"
items_selected = []
action = None
main_list = None
topMenu = None
contextMenu = None
fn_dialog = None
shf = None
def __init__(self, master = None):
self.controller = controller.Controller()
self.current_dir = path.expanduser(self.current_dir)
if master is None:
master = tk.Tk()
master.geometry("400x400")
tk.Frame.__init__(self, master)
self.pack(fill=tk.BOTH, expand=True)
self.master.bind("<Button-1>", self.closeContextMenu)
self.createWidgets()
self.master.title(self.current_dir + " - Pylemanager")
self.master.mainloop()
def createWidgets(self):
self.createTopMenu()
self.createMainList()
self.createContextMenu()
def createTopMenu(self):
self.topMenu = tk.Menu(self)
file_submenu = tk.Menu(self.topMenu, tearoff=0)
file_submenu.add_command(label="New folder", command=self.newFolderDialog)
file_submenu.add_separator()
file_submenu.add_command(label="Quit", command=self.quit)
edit_submenu = tk.Menu(self.topMenu, tearoff=0)
edit_submenu.add_command(label="Copy", command=lambda: self.registerAction("copy"))
edit_submenu.add_command(label="Cut", command=lambda: self.registerAction("cut"))
edit_submenu.add_command(label="Paste", command=self.execAction)
edit_submenu.add_separator()
edit_submenu.add_command(label="Delete", command=self.deleteAction)
self.shf = tk.IntVar()
self.shf.set(0)
config_submenu = tk.Menu(self.topMenu, tearoff=0)
config_submenu.add_checkbutton(label="Show hidden files", variable=self.shf, command=self.reloadMainList)
config_submenu.add_separator()
config_submenu.add_command(label="About", command=lambda: webbrowser.open("https://github.com/lucasmenendez/pylemanager"))
self.topMenu.add_cascade(label="File", menu=file_submenu)
self.topMenu.add_cascade(label="Edit", menu=edit_submenu)
self.topMenu.add_cascade(label="Settings", menu=config_submenu)
self.master.config(menu=self.topMenu)
def createMainList(self):
if self.main_list is None:
self.main_list = tk.Listbox(self, selectmode=tk.MULTIPLE)
self.main_list.pack(fill=tk.BOTH, expand=True)
self.reloadMainList()
self.main_list.bind("<Double-Button-1>", self.reloadMainList)
def reloadMainList(self, event = None):
if not event is None and self.main_list.curselection():
folder_index = self.main_list.curselection()[0]
folder = self.main_list.get(int(folder_index))
if folder == "../":
folder = self.current_dir.split("/")
del folder[len(folder) - 1]
if len(folder) > 1:
self.current_dir = "/".join(folder)
else:
self.current_dir = "/"
else:
self.current_dir += "/" + folder
index = 0
if self.shf is None:
self.shf = tk.IntVar()
self.shf.set(0)
self.master.title(self.current_dir + " - Pylemanager")
items = self.controller.loadFolder(self.current_dir, self.shf.get())
items.sort()
self.main_list.delete(0, tk.END)
if self.current_dir != "/":
index = 1
self.main_list.insert(0, "../")
else:
self.current_dir = ""
for item in items:
self.main_list.insert(index, item)
index += 1
def createContextMenu(self):
self.contextMenu = tk.Menu(self, tearoff=0)
self.contextMenu.add_command(label="Copy", command=lambda: self.registerAction("copy"))
self.contextMenu.add_command(label="Cut", command=lambda: self.registerAction("cut"))
self.contextMenu.add_command(label="Paste", command=self.execAction)
self.contextMenu.add_command(label="Delete", command=self.deleteAction)
self.contextMenu.add_separator()
self.contextMenu.add_command(label="Reload", command=self.reloadMainList)
self.main_list.bind("<Button-3>", self.openContextMenu)
def openContextMenu(self, event):
self.contextMenu.post(event.x_root, event.y_root)
def closeContextMenu(self, event):
self.contextMenu.unpost()
def newFolderDialog(self):
self.fn_dialog = tk.Toplevel()
self.fn_dialog.title("Create folder")
fn = tk.StringVar()
fd_field = tk.Entry(self.fn_dialog, textvariable=fn)
fd_field.pack(fill=tk.X)
submit = tk.Button(self.fn_dialog, text="Create folder", command=lambda: self.submitNewFolder(fn.get()))
submit.pack(side=tk.LEFT)
cancel = tk.Button(self.fn_dialog, text="Close", command=self.fn_dialog.destroy)
cancel.pack(side=tk.RIGHT)
self.fn_dialog.mainloop()
def getSelection(self):
selection = self.main_list.curselection()
items_selected = []
for index in selection:
basename = self.main_list.get(int(index))
if not basename.startswith("../"):
item = self.current_dir + "/" + basename
items_selected.append(item)
return items_selected
def registerAction(self, action, display_info = True):
self.items_selected = self.getSelection()
self.action = action
self.main_list.selection_clear(0, tk.END)
def execAction(self):
if not self.action is None:
action = getattr(self.controller, self.action)
location = self.current_dir
selection = self.getSelection()
if len(selection) > 0:
location = selection[0]
if len(self.items_selected) > 0:
if tkMessageBox.askokcancel(self.action.capitalize(), "Are you sure? This action can be permanent"):
if action(self.items_selected, location):
self.action = None
else:
tkMessageBox.showerror("Ops", "An error occurred :(")
self.reloadMainList()
def submitNewFolder(self, folder_name):
folder = self.current_dir + "/" + folder_name
if not self.controller.createFolder(folder):
self.statusbar_content.set("Error ocurred creating '"+folder_name+"' folder.")
else:
self.reloadMainList()
self.fn_dialog.destroy()
def deleteAction(self):
if self.main_list.curselection():
self.registerAction("delete")
self.execAction()
|
"""
/***************************************************************************
vector_selectbypoint
A QGIS plugin
Select vector features, point and click.
-------------------
begin : 2014-04-07
copyright : (C) 2014 by Brylie Oxley
email : brylie@geolibre.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load vector_selectbypoint class from file vector_selectbypoint
from vector_selectbypoint import vector_selectbypoint
return vector_selectbypoint(iface)
|
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq',10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: lightsensors_freq")
sys.exit(1)
return f
if __name__ == "__main__":
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValue, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
rospy.init_node('lightsensors')
|
import sys, sqlite3, os, inspect, json
from os.path import expanduser
from pyexcel_ods import save_data, get_data
from collections import OrderedDict
from bbdd import Bbdd
class Ods:
def __init__(self, directory=expanduser("~/betcon.ods"), directory_bd = None):
self.directory = directory
self.directory_bd = directory_bd
def export(self):
if self.directory_bd is None:
bd = Bbdd()
else:
bd = Bbdd(self.directory_bd)
file = OrderedDict()
data = bd.select("bet")
dataOds = [["Fecha", "Deporte", "Competicion", "Región", "Local", "Visitante", "Pick", "Casa", "Mercado",
"Tipster", "Stake", "Unidad", "Resultado", "Beneficio", "Apuesta", "Cuota", "Gratuita"]]
for i in data:
row = []
row.append(i[1])
row.append(bd.getValue(i[2], "sport"))
row.append(bd.getValue(i[3], "competition"))
row.append(bd.getValue(i[4], "region"))
row.append(i[5])
row.append(i[6])
row.append(i[7])
row.append(bd.getValue(i[8], "bookie"))
row.append(bd.getValue(i[9], "market"))
row.append(bd.getValue(i[10], "tipster"))
row.append(i[11])
row.append(i[12])
row.append(i[13])
row.append(i[14])
row.append(i[15])
row.append(i[16])
row.append("No" if i[17] == 0 else "Sí")
dataOds.append(row)
bd.close()
file.update({"Apuestas": dataOds})
save_data(self.directory, file)
def imports(self):
if self.directory_bd is None:
bd = Bbdd()
else:
bd = Bbdd(self.directory_bd)
try:
data = get_data(self.directory)
data = data.popitem()[1]
for i in data:
row = []
if i[0] == "Fecha":
continue
row.append(i[0])
sport = bd.getId("'" + i[1] + "'", "sport")
if sport is None:
bd.insert(["name"], [i[1]], "sport")
sport = bd.getId("'" + i[1] + "'", "sport")
row.append(sport)
region = bd.getId("'" + i[3] + "'", "region")
if region is None:
bd.insert(["name"], [i[3]], "region")
region = bd.getId("'" + i[3] + "'", "region")
competition = bd.getId("'" + i[2] + "'", "competition")
if competition is None:
bd.insert(["name", "region", "sport"], [i[2], region, sport], "competition")
competition = bd.getId("'" + i[2] + "'", "competition")
row.append(competition)
row.append(region)
row.append(i[4])
row.append(i[5])
row.append(i[6])
bookie = bd.getId("'" + i[7] + "'", "bookie")
if bookie is None:
bd.insert(["name"], [i[7]], "bookie")
bookie = bd.getId("'" + i[7] + "'", "bookie")
row.append(bookie)
market = bd.getId("'" + i[8] + "'", "market")
if market is None:
bd.insert(["name"], [i[8]], "market")
market = bd.getId("'" + i[8] + "'", "market")
row.append(market)
tipster = bd.getId("'" + i[9] + "'", "tipster")
if tipster is None:
bd.insert(["name"], [i[9]], "tipster")
tipster = bd.getId("'" + i[9] + "'", "tipster")
row.append(tipster)
row.append(i[10])
row.append(i[11])
row.append(i[12])
row.append(i[13])
row.append(i[14])
row.append(i[15])
row.append(False if i[16] == "No" else True)
columns = ["date", "sport", "competition", "region", "player1", "player2", "pick", "bookie", "market",
"tipster", "stake", "one", "result", "profit", "bet", "quota", "free"]
bd.insert(columns, row, "bet")
except:
return "Error de importación: El archivo de importación no tiene una estructura correcta."
|
import sys
for line in open (sys.argv[1], 'r'):
s = line.rstrip().split(' ')
output = ""
iterZeros = iter(s)
for t in iterZeros:
if t == '0':
output += next(iterZeros)
else:
output += '1' * len(next(iterZeros))
print int('0b' + output, 2)
|
"""Test target identification, iteration and inclusion/exclusion."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import re
import errno
import itertools
import abc
from . import types as t
from .util import (
ApplicationError,
display,
read_lines_without_comments,
is_subdir,
to_text,
to_bytes,
)
from .data import (
data_context,
)
MODULE_EXTENSIONS = '.py', '.ps1'
try:
TCompletionTarget = t.TypeVar('TCompletionTarget', bound='CompletionTarget')
except AttributeError:
TCompletionTarget = None # pylint: disable=invalid-name
try:
TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound='IntegrationTarget')
except AttributeError:
TIntegrationTarget = None # pylint: disable=invalid-name
def find_target_completion(target_func, prefix):
"""
:type target_func: () -> collections.Iterable[CompletionTarget]
:type prefix: unicode
:rtype: list[str]
"""
try:
targets = target_func()
short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
matches = walk_completion_targets(targets, prefix, short)
return matches
except Exception as ex: # pylint: disable=locally-disabled, broad-except
return [u'%s' % ex]
def walk_completion_targets(targets, prefix, short=False):
"""
:type targets: collections.Iterable[CompletionTarget]
:type prefix: str
:type short: bool
:rtype: tuple[str]
"""
aliases = set(alias for target in targets for alias in target.aliases)
if prefix.endswith('/') and prefix in aliases:
aliases.remove(prefix)
matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
if short:
offset = len(os.path.dirname(prefix))
if offset:
offset += 1
relative_matches = [match[offset:] for match in matches if len(match) > offset]
if len(relative_matches) > 1:
matches = relative_matches
return tuple(sorted(matches))
def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[T <= CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[T <= CompletionTarget]
"""
targets = tuple(targets)
include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda include_target: include_target.name)
if requires:
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
include_targets = [require_target for require_target in include_targets if require_target in require_targets]
if excludes:
list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
def filter_targets(targets, # type: t.Iterable[TCompletionTarget]
patterns, # type: t.List[str]
include=True, # type: bool
directories=True, # type: bool
errors=True, # type: bool
): # type: (...) -> t.Iterable[TCompletionTarget]
"""Iterate over the given targets and filter them based on the supplied arguments."""
unmatched = set(patterns or ())
compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
for target in targets:
matched_directories = set()
match = False
if patterns:
for alias in target.aliases:
for pattern in patterns:
if compiled_patterns[pattern].match(alias):
match = True
try:
unmatched.remove(pattern)
except KeyError:
pass
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
elif include:
match = True
if not target.base_path:
matched_directories.add('.')
for alias in target.aliases:
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
if match != include:
continue
if directories and matched_directories:
yield DirectoryTarget(sorted(matched_directories, key=len)[0], target.modules)
else:
yield target
if errors:
if unmatched:
raise TargetPatternsNotMatched(unmatched)
def walk_module_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
if not target.module:
continue
yield target
def walk_units_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
def walk_compile_targets(include_symlinks=True):
"""
:type include_symlinks: bool
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
def walk_powershell_targets(include_symlinks=True):
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
def walk_sanity_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
def walk_posix_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
yield target
def walk_network_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
yield target
def walk_windows_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
yield target
def walk_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
path = data_context().content.integration_targets_path
modules = frozenset(target.module for target in walk_module_targets())
paths = data_context().content.get_dirs(path)
prefixes = load_integration_prefixes()
for path in paths:
yield IntegrationTarget(path, modules, prefixes)
def load_integration_prefixes():
"""
:rtype: dict[str, str]
"""
path = data_context().content.integration_path
file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
prefixes = {}
for file_path in file_paths:
prefix = os.path.splitext(file_path)[1][1:]
with open(file_path, 'r') as prefix_fd:
prefixes.update(dict((k, prefix) for k in prefix_fd.read().splitlines()))
return prefixes
def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None, include_symlinks=False, include_symlinked_directories=False):
"""
:type path: str | None
:type module_path: str | None
:type extensions: tuple[str] | None
:type prefix: str | None
:type extra_dirs: tuple[str] | None
:type include_symlinks: bool
:type include_symlinked_directories: bool
:rtype: collections.Iterable[TestTarget]
"""
if path:
file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
else:
file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
for file_path in file_paths:
name, ext = os.path.splitext(os.path.basename(file_path))
if extensions and ext not in extensions:
continue
if prefix and not name.startswith(prefix):
continue
symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
if symlink and not include_symlinks:
continue
yield TestTarget(file_path, module_path, prefix, path, symlink)
file_paths = []
if extra_dirs:
for extra_dir in extra_dirs:
for file_path in data_context().content.get_files(extra_dir):
file_paths.append(file_path)
for file_path in file_paths:
symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
if symlink and not include_symlinks:
continue
yield TestTarget(file_path, module_path, prefix, path, symlink)
def analyze_integration_target_dependencies(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str,set[str]]
"""
real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
role_targets = [target for target in integration_targets if target.type == 'role']
hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
dependencies = collections.defaultdict(set)
# handle setup dependencies
for target in integration_targets:
for setup_target_name in target.setup_always + target.setup_once:
dependencies[setup_target_name].add(target.name)
# handle target dependencies
for target in integration_targets:
for need_target in target.needs_target:
dependencies[need_target].add(target.name)
# handle symlink dependencies between targets
# this use case is supported, but discouraged
for target in integration_targets:
for path in data_context().content.walk_files(target.path):
if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
continue
real_link_path = os.path.realpath(path)
if not real_link_path.startswith(real_target_root):
continue
link_target = real_link_path[len(real_target_root):].split('/')[0]
if link_target == target.name:
continue
dependencies[link_target].add(target.name)
# intentionally primitive analysis of role meta to avoid a dependency on pyyaml
# script based targets are scanned as they may execute a playbook with role dependencies
for target in integration_targets:
meta_dir = os.path.join(target.path, 'meta')
if not os.path.isdir(meta_dir):
continue
meta_paths = data_context().content.get_files(meta_dir)
for meta_path in meta_paths:
if os.path.exists(meta_path):
with open(meta_path, 'rb') as meta_fd:
# try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
try:
meta_lines = to_text(meta_fd.read()).splitlines()
except UnicodeDecodeError:
continue
for meta_line in meta_lines:
if re.search(r'^ *#.*$', meta_line):
continue
if not meta_line.strip():
continue
for hidden_target_name in hidden_role_target_names:
if hidden_target_name in meta_line:
dependencies[hidden_target_name].add(target.name)
while True:
changes = 0
for dummy, dependent_target_names in dependencies.items():
for dependent_target_name in list(dependent_target_names):
new_target_names = dependencies.get(dependent_target_name)
if new_target_names:
for new_target_name in new_target_names:
if new_target_name not in dependent_target_names:
dependent_target_names.add(new_target_name)
changes += 1
if not changes:
break
for target_name in sorted(dependencies):
consumers = dependencies[target_name]
if not consumers:
continue
display.info('%s:' % target_name, verbosity=4)
for consumer in sorted(consumers):
display.info(' %s' % consumer, verbosity=4)
return dependencies
class CompletionTarget:
"""Command-line argument completion target base class."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.name = None
self.path = None
self.base_path = None
self.modules = tuple()
self.aliases = tuple()
def __eq__(self, other):
if isinstance(other, CompletionTarget):
return self.__repr__() == other.__repr__()
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name.__lt__(other.name)
def __gt__(self, other):
return self.name.__gt__(other.name)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.modules:
return '%s (%s)' % (self.name, ', '.join(self.modules))
return self.name
class DirectoryTarget(CompletionTarget):
"""Directory target."""
def __init__(self, path, modules):
"""
:type path: str
:type modules: tuple[str]
"""
super(DirectoryTarget, self).__init__()
self.name = path
self.path = path
self.modules = modules
class TestTarget(CompletionTarget):
"""Generic test target."""
def __init__(self, path, module_path, module_prefix, base_path, symlink=None):
"""
:type path: str
:type module_path: str | None
:type module_prefix: str | None
:type base_path: str
:type symlink: bool | None
"""
super(TestTarget, self).__init__()
if symlink is None:
symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
self.symlink = symlink
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = (self.module,)
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
class IntegrationTarget(CompletionTarget):
"""Integration test target."""
non_posix = frozenset((
'network',
'windows',
))
categories = frozenset(non_posix | frozenset((
'posix',
'module',
'needs',
'skip',
)))
def __init__(self, path, modules, prefixes):
"""
:type path: str
:type modules: frozenset[str]
:type prefixes: dict[str, str]
"""
super(IntegrationTarget, self).__init__()
self.name = os.path.basename(path)
self.path = path
# script_path and type
contents = [os.path.basename(p) for p in data_context().content.get_files(path)]
runme_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'runme')
test_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'test')
self.script_path = None
if runme_files:
self.type = 'script'
self.script_path = os.path.join(path, runme_files[0])
elif test_files:
self.type = 'special'
elif os.path.isdir(os.path.join(path, 'tasks')) or os.path.isdir(os.path.join(path, 'defaults')):
self.type = 'role'
else:
self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
# static_aliases
try:
aliases_path = os.path.join(path, 'aliases')
static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
static_aliases = tuple()
# modules
if self.name in modules:
module_name = self.name
elif self.name.startswith('win_') and self.name[4:] in modules:
module_name = self.name[4:]
else:
module_name = None
self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
# groups
groups = [self.type]
groups += [a for a in static_aliases if a not in modules]
groups += ['module/%s' % m for m in self.modules]
if not self.modules:
groups.append('non_module')
if 'destructive' not in groups:
groups.append('non_destructive')
if '_' in self.name:
prefix = self.name[:self.name.find('_')]
else:
prefix = None
if prefix in prefixes:
group = prefixes[prefix]
if group != prefix:
group = '%s/%s' % (group, prefix)
groups.append(group)
if self.name.startswith('win_'):
groups.append('windows')
if self.name.startswith('connection_'):
groups.append('connection')
if self.name.startswith('setup_') or self.name.startswith('prepare_'):
groups.append('hidden')
if self.type not in ('script', 'role'):
groups.append('hidden')
targets_relative_path = data_context().content.integration_targets_path
# Collect file paths before group expansion to avoid including the directories.
# Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
for group in itertools.islice(groups, 0, len(groups)):
if '/' in group:
parts = group.split('/')
for i in range(1, len(parts)):
groups.append('/'.join(parts[:i]))
if not any(g in self.non_posix for g in groups):
groups.append('posix')
# aliases
aliases = [self.name] + \
['%s/' % g for g in groups] + \
['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
if 'hidden/' in aliases:
aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
self.aliases = tuple(sorted(set(aliases)))
# configuration
self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
class TargetPatternsNotMatched(ApplicationError):
"""One or more targets were not matched when a match was required."""
def __init__(self, patterns):
"""
:type patterns: set[str]
"""
self.patterns = sorted(patterns)
if len(patterns) > 1:
message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
else:
message = 'Target pattern not matched: %s' % self.patterns[0]
super(TargetPatternsNotMatched, self).__init__(message)
|
import logging
from gi.repository import GLib, GObject
class Timer(GObject.Object):
"""Dynamic timer that allows dynamically changing frequency"""
timeout = GObject.Property(type=GObject.TYPE_UINT)
def __init__(self, function, **kwargs):
super().__init__(**kwargs)
assert (callable(function))
self._paused = False
self._func = function
self._id = 0
self._add_timeout()
self.connect('notify::timeout', self._on_timeout_changed)
self._run_func()
def __del__(self):
logging.debug('Timer removed')
if self._id:
GLib.source_remove(self._id)
def _add_timeout(self):
if self._id:
GLib.source_remove(self._id)
self._id = GLib.timeout_add_seconds(self.timeout, self._run_func)
def _on_timeout_changed(self, prop, param):
logging.debug('Timeout changed')
self._add_timeout()
def _run_func(self):
if not self._paused:
try:
logging.debug('Timer running')
self._func()
except Exception as e:
logging.exception(e)
return GLib.SOURCE_CONTINUE
def pause(self):
self._paused = True
def resume(self):
self._paused = False
def run_once(self):
# To be the most efficient we will restart the timer from here
GLib.source_remove(self._id)
self._id = 0
def run_real():
self._run_func()
self._add_timeout()
return GLib.SOURCE_REMOVE
# FIXME: The timing of this is wrong but logically all of our HTTP calls are in the correct order
# the server just doesn't respond with the up to date information for some actions
GLib.timeout_add(250, run_real)
|
import sys
from Bio import SeqIO
import Script
def usage():
sys.stderr.write("""methreport.py - Report methylation rate at CG and GC positions.
Usage: methreport.py [-gcg] infile [outfile]
`Infile' should be a multi-FASTA file in which the first sequence is assumed to
be the reference. All other sequences should have the same length as the reference
and be aligned to it. This program will identify all CG and GC positions in the
reference sequence (including GCG positions if the -gcg option is specified) and
report to the output the nummber and fraction of unconverted Cs at each position.
Output is written to `outfile', is specified, or to standard output. Entries for
CG positions are written first, followed by those for GC positions. The output is
in tab-delimited format with three columns: position, number of unconverted Cs,
fraction of unconverted Cs (over total number of sequences examined).
Options:
-gcg | Do not exclude GCG positions from analysis.
""")
P = Script.Script("methreport.py", version="1.0", usage=usage)
EXCLGCG=False
class refDesc():
"""A class containing the reference sequence, its length, and a list of CG and GC positions."""
sequence = None
length = 0
CGpositions = []
GCpositions = []
numCGs = 0
numGCs = 0
global EXCLGCG
def __init__(self, ref):
length = len(ref)
self.sequence = ref
self.length = length
self.CGpositions = detectCG(ref, length, EXCLGCG)
self.GCpositions = detectGC(ref, length, EXCLGCG)
self.numCGs = len(self.CGpositions)
self.numGCs = len(self.GCpositions)
def loadSequences(filename):
return SeqIO.parse(filename, "fasta")
def detectCG(seq, length, excludeGCG=False):
"""Returns the list of C positions in CG dinucleotides in sequence `seq'.
If `excludeGCG' is True, ignores GCG positions."""
result = []
candidate = False
for i in range(length):
if (seq[i] == 'C'):
candidate = i
elif (seq[i] == 'G') and candidate:
if excludeGCG:
if (i < 2) or seq[i-2] != 'G':
result.append(candidate)
candidate = False
else:
result.append(candidate)
candidate = False
else:
candidate = False
return result
def detectGC(seq, length, excludeGCG=False):
"""Returns the list of C positions in GC dinucleotides in sequence `seq'.
If `excludeGCG' is True, ignores GCG positions."""
result = []
candidate = False
for i in range(1, length):
if (seq[i] == 'C') and (seq[i-1] == 'G'):
# This is a GC position. Now check position i+1 if excluding GCGs
if excludeGCG:
if (i == length-1) or seq[i+1] != 'G':
result.append(i)
# Otherwise, simply add the position
else:
result.append(i)
return result
def formatTabDelim(stream, l):
stream.write("\t".join(l) + "\n")
def main():
global EXCLGCG
infile = None
outfile = ""
# Parse arguments
args = sys.argv[1:]
P.standardOpts(args)
for arg in args:
if arg == "-gcg":
EXCLGCG = True
elif infile == "":
infile = P.isFile(arg)
else:
outfile = arg
if not infile:
P.errmsg(P.NOFILE)
nreads = 0
seqs = loadSequences(infile)
rd = refDesc(seqs.next()) # reference sequence
print("Reference sequence loaded from file `{}'.".format(infile))
print("{}bp, {} CG positions, {} GC positions.".format(rd.length, rd.numCGs, rd.numGCs))
CGarr = [ [p, 0] for p in rd.CGpositions ]
GCarr = [ [p, 0] for p in rd.GCpositions ]
print("Reading sequences...")
for s in seqs:
nreads += 1
for p in CGarr:
if s[p[0]] == 'C':
p[1] += 1
for p in GCarr:
if s[p[0]] == 'C':
p[1] += 1
if outfile:
out = open(outfile, "w")
else:
out = sys.stdout
try:
formatTabDelim(out, ["#CG", "Sites:", str(len(CGarr))])
formatTabDelim(out, ["CG pos", "Num C", "Perc C"])
for pair in CGarr:
formatTabDelim(out, [str(pair[0]), str(pair[1]), str(1.0 * pair[1] / nreads)])
out.write("\n")
formatTabDelim(out, ["#GC", "Sites:", str(len(GCarr))])
formatTabDelim(out, ["GC pos", "Num C", "Perc C"])
for pair in GCarr:
formatTabDelim(out, [str(pair[0]), str(pair[1]), str(1.0 * pair[1] / nreads)])
finally:
if outfile:
out.close()
if __name__ == "__main__":
main()
|
from decimal import Decimal
from uuid import UUID
import datetime
from dateutil.parser import parse as parse_date
from requests.compat import urlparse
TRUTHY_VALS = {'true', 'yes', '1'}
DT_RET = {'char', 'string', 'bin.base64', 'bin.hex'}
DT_INT = {'ui1', 'ui2', 'ui4', 'i1', 'i2', 'i4'}
DT_DECIMAL = {'r4', 'r8', 'number', 'float', 'fixed.14.4'}
DT_DATE = {'date'}
DT_DATETIME = {'dateTime', 'dateTime.tz'}
DT_TIME = {'time', 'time.tz'}
DT_BOOL = {'boolean'}
DT_URI = {'uri'}
DT_UUID = {'uuid'}
def parse_time(val):
"""
Parse a time to a `datetime.time` value.
Can't just use `dateutil.parse.parser(val).time()` because that doesn't preserve tzinfo.
"""
dt = parse_date(val)
if dt.tzinfo is None:
return dt.time()
return datetime.time(dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo)
MARSHAL_FUNCTIONS = (
(DT_RET, lambda x: x),
(DT_INT, int),
(DT_DECIMAL, Decimal),
(DT_DATE, lambda x: parse_date(x).date()),
(DT_DATETIME, parse_date),
(DT_TIME, parse_time),
(DT_BOOL, lambda x: x.lower() in TRUTHY_VALS),
(DT_URI, urlparse),
(DT_UUID, UUID)
)
def marshal_value(datatype, value):
"""
Marshal a given string into a relevant Python type given the uPnP datatype.
Assumes that the value has been pre-validated, so performs no checks.
Returns a tuple pair of a boolean to say whether the value was marshalled and the (un)marshalled
value.
"""
for types, func in MARSHAL_FUNCTIONS:
if datatype in types:
return True, func(value)
return False, value
|
import math, sys
import svgwrite
PROGNAME = sys.argv[0].rstrip('.py')
def create_svg(name):
svg_size_width = 900
svg_size_height = 1600
font_size = 20
square_size = 30
title1 = name + ': Part 5 tiling with multiple def, groups, use, translate and scale.'
sqrt3 = math.sqrt(3) # do this calc once instead of repeating the calc many times.
dwg = svgwrite.Drawing(name, (svg_size_width, svg_size_height), debug=True)
# ####################
# p3m1 - Mirror and Three rotations
# - Equilateral triangle mirrored, rotated
# All three sides are the same length, all three angles are 60 degrees.
# The height of the triangle h = sqrt(3)/2.0 * length of a side
# The centre of the triangle is sqrt(3)/6.0 * length of a side
defs_g_trieq_size_x = square_size
defs_g_trieq_size_y = defs_g_trieq_size_x * sqrt3 / 2.0
defs_g_trieq_centre = sqrt3 / 6.0 * defs_g_trieq_size_x
# width of equilateral triangle at the centre
defs_g_trieq_centre_size_x = defs_g_trieq_size_x - defs_g_trieq_size_x * defs_g_trieq_centre / defs_g_trieq_size_y
# defs_g_trieq = dwg.defs.add(dwg.g(id='defs_g_trieq', clip_path='url(#cliptrieq)'))
defs_g_trieq = dwg.defs.add(dwg.g(id='defs_g_trieq'))
defs_g_trieq.add(dwg.polygon([(0, -defs_g_trieq_size_y + defs_g_trieq_centre), (defs_g_trieq_size_x / 2.0, defs_g_trieq_centre),
(-defs_g_trieq_size_x / 2.0, defs_g_trieq_centre)], stroke='none'))
defs_g_trieq.add(dwg.polygon([(-defs_g_trieq_size_x / 2.0, defs_g_trieq_centre), (-defs_g_trieq_centre_size_x / 2.0, 0),
(defs_g_trieq_centre_size_x / 2.0, 0), (0, defs_g_trieq_centre)], stroke='none', fill='yellow'))
# Create mirror of the equilateral triangle.
defs_g_trieq_m = dwg.defs.add(dwg.g(id='defs_g_trieq_m'))
defs_g_trieq_m.add(dwg.use(defs_g_trieq, insert=(0, 0)))
defs_g_trieq_m.scale(-1, -1)
# Create combined cell
defs_g_trieq_cc_size_x = 1.5 * defs_g_trieq_size_x
defs_g_trieq_cc_size_y = defs_g_trieq_size_y
defs_g_trieq_cc = dwg.defs.add(dwg.g(id='defs_g_trieq_cc'))
defs_g_trieq_cc.add(dwg.use(defs_g_trieq, insert=(-defs_g_trieq_size_x / 4.0, defs_g_trieq_size_y / 2.0 - defs_g_trieq_centre)))
defs_g_trieq_cc.add(dwg.use(defs_g_trieq_m, insert=(defs_g_trieq_size_x / 4.0, -(defs_g_trieq_size_y / 2.0 - defs_g_trieq_centre))))
# Create rotations of combined cell
defs_g_trieq_cc_120 = dwg.defs.add(dwg.g(id='defs_g_trieq_cc_120'))
defs_g_trieq_cc_120.add(dwg.use(defs_g_trieq_cc, insert=(0, 0), fill='mediumblue'))
defs_g_trieq_cc_120.rotate(120, center=(0, 0))
defs_g_trieq_cc_m120 = dwg.defs.add(dwg.g(id='defs_g_trieq_cc_m120'))
defs_g_trieq_cc_m120.add(dwg.use(defs_g_trieq_cc, insert=(0, 0), fill='navy'))
defs_g_trieq_cc_m120.rotate(-120, center=(0, 0))
# Create pattern from rotations of combined cell
defs_g_trieq_pattern_size_x = 2 * defs_g_trieq_size_x
defs_g_trieq_pattern_size_y = 2 * defs_g_trieq_size_y
defs_g_trieq_pattern = dwg.defs.add(dwg.g(id='defs_g_trieq_pattern'))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc, insert=(-defs_g_trieq_size_x / 4.0, -defs_g_trieq_cc_size_y / 2.0)))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc_120, insert=(defs_g_trieq_size_x / 2.0, 0)))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc_m120, insert=(-defs_g_trieq_size_x / 4.0, defs_g_trieq_cc_size_y / 2.0)))
# ####################
# p31m - Three rotations and a mirror
# - A Kite shape, half hexagon, and half of a 60 degree diamond will all work for this
# symmetry. This one will use a kite.
# 30, 60, 90 angle triangle
# The length of the sides are 1:sqrt(3):2 2 is the hypotenuse
# invsqrt2 = 1/sqrt2
# invsqrt2_2 = invsqrt2 * invsqrt2 = 1/2 = .5 by definition
# sin and cos(45 degrees) is 1/sqrt2 = 0.707106781187
# cos(30 degrees) is sqrt3/2
# sin(30 degrees) is 1/2
# tan(30) = 1/sqrt(3)
# The height of equilateral triangle h = sqrt(3)/2.0 * length of a side
# The centre of equilateral triangle is sqrt(3)/6.0 * length of a side
defs_g_kite_size_x = square_size
defs_g_kite_size_y = defs_g_kite_size_x * sqrt3 / 2.0 + defs_g_kite_size_x * sqrt3 / 6.0
# Having a clip path seems to increase the visibility of the lines between the tiles.
# A clipping path may be necessary if the shapes go outside the triangle.
# defs_g_kite = dwg.defs.add(dwg.g(id='defs_g_kite', clip_path='url(#clipkite)'))
defs_g_kite = dwg.defs.add(dwg.g(id='defs_g_kite'))
defs_g_kite.add(dwg.polygon([(0, 0),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none'))
#defs_g_kite.add(dwg.polygon([(0, 0),
# (defs_g_kite_size_x / 4.0, (defs_g_kite_size_y + defs_g_kite_size_x / (sqrt3 * 2.0)) / 2.0),
# (-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none', fill='yellow'))
defs_g_kite.add(dwg.polygon([(0, 0),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y / 12.0),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none',
fill='black'))
defs_g_kite.add(dwg.polygon([(0, defs_g_kite_size_y),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y * 8.0 / 12.0),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none',
fill='green'))
# Create rotations of the kite.
defs_g_kite_120 = dwg.defs.add(dwg.g(id='defs_g_kite_120'))
defs_g_kite_120.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_120.rotate(120, center=(0, 0))
defs_g_kite_m120 = dwg.defs.add(dwg.g(id='defs_g_kite_m120'))
defs_g_kite_m120.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_m120.rotate(-120, center=(0, 0))
# Now use the cell, rotated cells to create the combined cell.
# The height of equilateral triangle h = sqrt(3) / 2.0 * length of a side
defs_g_kite_cc_size_x = 2 * defs_g_kite_size_x
defs_g_kite_cc_size_y = defs_g_kite_size_x * sqrt3 # 2*(sqrt(3)/2.0)
defs_g_kite_cc = dwg.defs.add(dwg.g(id='defs_g_kite_cc'))
defs_g_kite_cc.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_cc.add(dwg.use(defs_g_kite_120, insert=(0, 0)))
defs_g_kite_cc.add(dwg.use(defs_g_kite_m120, insert=(0, 0)))
# Now use the combined cell to create a mirrored combined cell
defs_g_kite_mcc = dwg.defs.add(dwg.g(id='defs_g_kite_mcc'))
defs_g_kite_mcc.add(dwg.use(defs_g_kite_cc, insert=(0, 0)))
defs_g_kite_mcc.scale(-1, -1)
# Now use the combined cell, and mirrored combined cell to create a pattern
defs_g_kite_pattern_size_x = 1.5 * defs_g_kite_cc_size_x
defs_g_kite_pattern_size_y = defs_g_kite_cc_size_y
defs_g_kite_pattern = dwg.defs.add(dwg.g(id='defs_g_kite_pattern'))
defs_g_kite_pattern.add(dwg.use(defs_g_kite_cc, insert=(-defs_g_kite_cc_size_x / 4.0, -sqrt3 / 12.0 * defs_g_kite_cc_size_x)))
defs_g_kite_pattern.add(dwg.use(defs_g_kite_mcc, insert=(defs_g_kite_cc_size_x / 4.0, sqrt3 / 12.0 * defs_g_kite_cc_size_x)))
# ####################
# p6m - Kaleidoscope Either of the two long sides of the primary triangle is mirrored. The
# resulting shape is rotated six times.
# 30, 60, 90 angle triangle
# The length of the sides are 1:sqrt(3):2 2 is the hypotenuse
# invsqrt2 = 1/sqrt2
# invsqrt2_2 = invsqrt2 * invsqrt2 = 1/2 = .5 by definition
# sin and cos(45 degrees) is 1/sqrt2 = 0.707106781187
# cos(30 degrees) is sqrt3/2
# sin(30 degrees) is 1/2
# tan(30) = 1/sqrt(3)
# # The height of equilateral triangle h = sqrt(3) / 2.0 * length of a side
# # The centre of equilateral triangle is sqrt(3) / 6.0 * length of a side
defs_g_kale_tri_size_x = square_size
defs_g_kale_tri_size_y = defs_g_kale_tri_size_x * 4.0 / sqrt3
# Having a clip path seems to increase the visibility of the lines between the tiles.
# A clipping path may be necessary if the shapes go outside the triangle.
# defs_g_kale_tri = dwg.defs.add(dwg.g(id='defs_g_kale_tri', clip_path='url(#clipkale)'))
defs_g_kale_tri = dwg.defs.add(dwg.g(id='defs_g_kale_tri'))
defs_g_kale_tri.add(dwg.polygon([(0, -defs_g_kale_tri_size_y), (0, 0), (-defs_g_kale_tri_size_x, defs_g_kale_tri_size_x / sqrt3
- defs_g_kale_tri_size_y)], stroke='none'))
defs_g_kale_tri.add(dwg.polygon([(-defs_g_kale_tri_size_x, defs_g_kale_tri_size_x / sqrt3 - defs_g_kale_tri_size_y), (0, 2.0
* defs_g_kale_tri_size_x / sqrt3 - defs_g_kale_tri_size_y), (0, 3.0 * defs_g_kale_tri_size_x / sqrt3
- defs_g_kale_tri_size_y)], stroke='none', fill='yellow'))
# Create mirror of the kale.
defs_g_kale_tri_m = dwg.defs.add(dwg.g(id='defs_g_kale_tri_m'))
defs_g_kale_tri_m.add(dwg.use(defs_g_kale_tri, insert=(0, 0)))
defs_g_kale_tri_m.scale(-1, 1)
# Now use the tri, rotated tri to create the combined cell.
defs_g_kale_cc_size_x = 2 * defs_g_kale_tri_size_x
defs_g_kale_cc_size_y = defs_g_kale_tri_size_y
defs_g_kale_cc = dwg.defs.add(dwg.g(id='defs_g_kale_cc'))
defs_g_kale_cc.add(dwg.use(defs_g_kale_tri, insert=(0, 0)))
defs_g_kale_cc.add(dwg.use(defs_g_kale_tri_m, insert=(0, 0)))
# Now rotate the combined cell.
defs_g_kale_cc_60 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_60'))
defs_g_kale_cc_60.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_60.rotate(60, center=(0, 0))
defs_g_kale_cc_120 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_120'))
defs_g_kale_cc_120.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_120.rotate(120, center=(0, 0))
defs_g_kale_cc_180 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_180'))
defs_g_kale_cc_180.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_180.rotate(180, center=(0, 0))
defs_g_kale_cc_m60 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_m60'))
defs_g_kale_cc_m60.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_m60.rotate(-60, center=(0, 0))
defs_g_kale_cc_m120 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_m120'))
defs_g_kale_cc_m120.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_m120.rotate(-120, center=(0, 0))
# Now use the cell and five rotated cells to create the pattern.
defs_g_kale_pattern_size_x = 2 * defs_g_kale_cc_size_x
defs_g_kale_pattern_size_y = 2 * defs_g_kale_cc_size_y
defs_g_kale_pattern = dwg.defs.add(dwg.g(id='defs_g_kale_pattern'))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_60, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_120, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_180, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_m60, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_m120, insert=(0, 0)))
# ########################
# Background will be dark but not black so the background does not overwhelm the colors.
dwg.add(dwg.rect(insert=(0, 0), size=('100%', '100%'), rx=None, ry=None, fill='grey'))
# Give the name of the example and a title.
y = font_size + 5
dwg.add(dwg.text(title1, insert=(0, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size
# p3m1 - Mirror and three rotations
title2 = 'Mirror and three rotations, math name: p3m1'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size + defs_g_trieq_size_x
cell_created = dwg.use(defs_g_trieq, insert=(50 + defs_g_trieq_size_x, y), fill='lightblue')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_trieq_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_trieq_cc, insert=(150 + defs_g_trieq_cc_size_x, y), fill='lightblue')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_trieq_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_trieq_pattern, insert=(250 + defs_g_trieq_cc_size_x, y), fill='lightblue')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(250 + defs_g_trieq_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_trieq_pattern_size_y
for i in range(8):
y += defs_g_trieq_pattern_size_y / 2.0
for j in range(6):
if i % 2:
x = 50 + j * 1.5 * defs_g_trieq_pattern_size_x
else:
x = 50 + 1.5 * defs_g_trieq_size_x + j * 1.5 * defs_g_trieq_pattern_size_x
pattern_created = dwg.use(defs_g_trieq_pattern, fill='lightblue')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_trieq_pattern_size_y
#
# p31m sample cell, combined cell and tile
title2 = 'Kite rotated and mirrored, math name: p31m'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size + defs_g_kite_size_y
cell_created = dwg.use(defs_g_kite, insert=(50 + defs_g_kite_size_x / 2.0, y), fill='navy')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_kite_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_kite_cc, insert=(150 + defs_g_kite_size_x / 2.0, y), fill='navy')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_kite_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
mcc_created = dwg.use(defs_g_kite_mcc, insert=(250 + defs_g_kite_cc_size_x / 2, y), fill='navy')
dwg.add(mcc_created)
dwg.add(dwg.circle(center=(250 + defs_g_kite_cc_size_x / 2, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_kite_pattern, insert=(350 + defs_g_kite_cc_size_x, y), fill='navy')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(350 + defs_g_kite_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_kite_pattern_size_y
for i in range(6):
y += defs_g_kite_pattern_size_y
for j in range(8):
if i % 2:
x = 100 + (j + 0.5) * defs_g_kite_cc_size_x
else:
x = 100 + j * defs_g_kite_cc_size_x
pattern_created = dwg.use(defs_g_kite_pattern, fill='navy')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_kite_pattern_size_y
# ##
# p6m kaleidoscope
title2 = 'Kaleidoscope 30, 60, 90 triangle mirrored and rotated, math name: p6m'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size
y += defs_g_kale_tri_size_y
cell_created = dwg.use(defs_g_kale_tri, insert=(50 + defs_g_kale_tri_size_x, y), fill='navy')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_kale_tri_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_kale_cc, insert=(150 + defs_g_kale_cc_size_x / 2.0, y), fill='navy')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_kale_cc_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_kale_pattern, insert=(250 + defs_g_kale_pattern_size_x / 2.0, y), fill='navy')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(250 + defs_g_kale_pattern_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_kale_pattern_size_y / 2.0
for i in range(4):
y += defs_g_kale_pattern_size_y - defs_g_kale_pattern_size_x / (sqrt3 * 2)
for j in range(6):
if i % 2:
x = 100 + j * defs_g_kale_pattern_size_x
else:
x = 100 + defs_g_kale_cc_size_x + j * defs_g_kale_pattern_size_x
pattern_created = dwg.use(defs_g_kale_pattern, fill='navy')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_kale_pattern_size_y
# All items have been added so save the svg to a the file.
dwg.save()
if __name__ == '__main__':
create_svg(PROGNAME + '.svg')
|
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import reverse
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.contrib import messages
from util.filterspecs import Filter, FilterBar
from reports.models import Report
@csrf_exempt
def upload(request):
if request.method == 'POST':
data = request.POST.copy()
meta = request.META.copy()
with transaction.atomic():
report = Report.objects.create()
report.parse(data, meta)
if settings.USE_ASYNC_PROCESSING:
from reports.tasks import process_report
process_report.delay(report.id)
if 'report' in data and data['report'] == '1':
packages = []
repos = []
if 'packages' in data:
for p in data['packages'].splitlines():
packages.append(p.replace('\'', '').split(' '))
if 'repos' in data:
repos = data['repos']
return render(request,
'reports/report.txt',
{'data': data,
'packages': packages,
'repos': repos},
content_type='text/plain')
else:
return HttpResponse(status=204)
else:
raise Http404
@login_required
def report_list(request):
reports = Report.objects.select_related()
if 'host_id' in request.GET:
reports = reports.filter(hostname=int(request.GET['host_id']))
if 'processed' in request.GET:
processed = request.GET['processed'] == 'True'
reports = reports.filter(processed=processed)
if 'search' in request.GET:
terms = request.GET['search'].lower()
query = Q()
for term in terms.split(' '):
q = Q(host__icontains=term)
query = query & q
reports = reports.filter(query)
else:
terms = ''
page_no = request.GET.get('page')
paginator = Paginator(reports, 50)
try:
page = paginator.page(page_no)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
filter_list = []
filter_list.append(Filter(request, 'processed',
{False: 'No', True: 'Yes'}))
filter_bar = FilterBar(request, filter_list)
return render(request,
'reports/report_list.html',
{'page': page,
'filter_bar': filter_bar,
'terms': terms}, )
@login_required
def report_detail(request, report_id):
report = get_object_or_404(Report, id=report_id)
return render(request,
'reports/report_detail.html',
{'report': report}, )
@login_required
def report_process(request, report_id):
report = get_object_or_404(Report, id=report_id)
report.process()
return render(request,
'reports/report_detail.html',
{'report': report}, )
@login_required
def report_delete(request, report_id):
report = get_object_or_404(Report, id=report_id)
if request.method == 'POST':
if 'delete' in request.POST:
report.delete()
text = 'Report {0!s} has been deleted'.format(report)
messages.info(request, text)
return redirect(reverse('reports:report_list'))
elif 'cancel' in request.POST:
return redirect(report.get_absolute_url())
return render(request,
'reports/report_delete.html',
{'report': report}, )
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_linkagg
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage link aggregation groups on Juniper JUNOS network devices
description:
- This module provides declarative management of link aggregation groups
on Juniper JUNOS network devices.
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group. A value of C(on) will enable LACP in C(passive) mode.
C(active) configures the link to actively information about the state of the link,
or it can be configured in C(passive) mode ie. send link state information only when
received them from another link. A value of C(off) will disable LACP.
default: off
choices: ['on', 'off', 'active', 'passive']
members:
description:
- List of members interfaces of the link aggregation group. The value can be
single interface or list of interfaces.
required: true
min_links:
description:
- Minimum members that should be up
before bringing up the link aggregation group.
device_count:
description:
- Number of aggregated ethernet devices that can be configured.
Acceptable integer value is between 1 and 128.
description:
description:
- Description of Interface.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
- name: configure link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
- name: delete link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: delete
- name: deactivate link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
active: False
- name: Activate link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
active: True
- name: Disable link aggregation
junos_linkagg:
name: ae11
state: down
- name: Enable link aggregation
junos_linkagg:
name: ae11
state: up
"""
RETURN = """
diff:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit interfaces]
+ ge-0/0/6 {
+ ether-options {
+ 802.3ad ae0;
+ }
+ }
[edit interfaces ge-0/0/7]
+ ether-options {
+ 802.3ad ae0;
+ }
[edit interfaces]
+ ae0 {
+ description "configured by junos_linkagg";
+ aggregated-ether-options {
+ lacp {
+ active;
+ }
+ }
+ }
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config, get_configuration
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
DEFAULT_COMMENT = 'configured by junos_linkagg'
def validate_device_count(value, module):
if value and not 1 <= value <= 128:
module.fail_json(msg='device_count must be between 1 and 128')
def validate_min_links(value, module):
if value and not 1 <= value <= 8:
module.fail_json(msg='min_links must be between 1 and 8')
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def configure_lag_params(module, warnings):
top = 'interfaces/interface'
param_lag_to_xpath_map = collections.OrderedDict()
param_lag_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True}),
('description', 'description'),
('min_links', {'xpath': 'minimum-links', 'top': 'aggregated-ether-options'}),
('disable', {'xpath': 'disable', 'tag_only': True}),
('mode', {'xpath': module.params['mode'], 'tag_only': True, 'top': 'aggregated-ether-options/lacp'}),
])
validate_param_values(module, param_lag_to_xpath_map)
want = map_params_to_obj(module, param_lag_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
diff = load_config(module, tostring(ele), warnings, action='replace')
if module.params['device_count']:
top = 'chassis/aggregated-devices/ethernet'
device_count_to_xpath_map = {'device_count': {'xpath': 'device-count', 'leaf_only': True}}
validate_param_values(module, device_count_to_xpath_map)
want = map_params_to_obj(module, device_count_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
diff = load_config(module, tostring(ele), warnings, action='replace')
return diff
def configure_member_params(module, warnings, diff=None):
top = 'interfaces/interface'
members = module.params['members']
if members:
member_to_xpath_map = collections.OrderedDict()
member_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True, 'parent_attrib': False}),
('bundle', {'xpath': 'bundle', 'leaf_only': True, 'top': 'ether-options/ieee-802.3ad', 'is_key': True}),
])
# link aggregation bundle assigned to member
module.params['bundle'] = module.params['name']
for member in members:
if module.params['state'] == 'absent':
# if link aggregate bundle is not assigned to member, trying to
# delete it results in rpc-reply error, hence if is not assigned
# skip deleting it and continue to next member.
resp = get_configuration(module)
bundle = resp.xpath("configuration/interfaces/interface[name='%s']/ether-options/"
"ieee-802.3ad[bundle='%s']" % (member, module.params['bundle']))
if not bundle:
continue
# Name of member to be assigned to link aggregation bundle
module.params['name'] = member
validate_param_values(module, member_to_xpath_map)
want = map_params_to_obj(module, member_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
diff = load_config(module, tostring(ele), warnings)
return diff
def main():
""" main entry point for module execution
"""
argument_spec = dict(
name=dict(required=True),
mode=dict(default='on', type='str', choices=['on', 'off', 'active', 'passive']),
members=dict(type='list'),
min_links=dict(type='int'),
device_count=dict(type='int'),
description=dict(default=DEFAULT_COMMENT),
state=dict(default='present', choices=['present', 'absent', 'up', 'down']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
required_one_of = [['name', 'collection']]
mutually_exclusive = [['name', 'collection']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
state = module.params.get('state')
module.params['disable'] = True if state == 'down' else False
if state in ('present', 'up', 'down'):
module.params['state'] = 'present'
else:
module.params['disable'] = True
if module.params.get('mode') == 'off':
module.params['mode'] = ''
elif module.params.get('mode') == 'on':
module.params['mode'] = 'passive'
with locked_config(module):
diff = configure_lag_params(module, warnings)
diff = configure_member_params(module, warnings, diff)
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
|
import matlab
import numpy as np
import opennft.config as c
class FD:
def __init__(self, xmax, module = None):
self.module = module
# names of the dofs
self.names = ['X', 'Y', 'Z', 'pitch', 'roll', 'yaw', 'FD']
self.mode = {
'tr': ['tr', 'translational', 'tr_sa'],
'rot': ['rot', 'rotational', 'rot_sa'],
'fd': ['FD', 'fd', 'FD_sa']
}
self.plotBgColor = c.PLOT_BACKGROUND_COLOR
k = np.array(list(matlab.double([[1e-05,1e-05,1e-05,1e-05,1e-05,1e-05]])))
self.data = np.array(k).astype(np.float)
self.radius = c.DEFAULT_FD_RADIUS
self.threshold = c.DEFAULT_FD_THRESHOLDS
self.xmax = xmax
self.FD = np.array([])
self.meanFD = 0
self.MD = np.array([])
self.meanMD = 0
self.blockIter = 1
self.excFD = [0, 0]
self.excVD = 0
self.excFDIndexes_1 = np.array([-1])
self.excFDIndexes_2 = np.array([-1])
self.excMDIndexes = np.array([-1])
self.rsqDispl = np.array([0])
# FD computation
def _di(self, i):
return np.array(self.data[i][0:3])
def _ri(self, i):
return np.array(self.data[i][3:6])
def _ij_FD(self,i,j): # displacement from i to j
return sum(np.absolute(self._di(j)-self._di(i))) + \
sum(np.absolute(self._ri(j)-self._ri(i))) * self.radius
def all_fd(self):
i = len(self.data) - 1
if not self.isNewDCMBlock:
self.FD = np.append(self.FD, self._ij_FD(i-1, i))
self.meanFD = self.meanFD + (self.FD[-1] - self.meanFD) / self.blockIter
else:
self.FD = np.append(self.FD, 0)
self.meanFD = 0
if self.FD[-1] >= self.threshold[1]:
self.excFD[0] += 1
if self.excFDIndexes_1[-1] == -1:
self.excFDIndexes_1 = np.array([i - 1])
else:
self.excFDIndexes_1 = np.append(self.excFDIndexes_1, i - 1)
if self.FD[-1] >= self.threshold[2]:
self.excFD[1] += 1
if self.excFDIndexes_2[-1] == -1:
self.excFDIndexes_2 = np.array([i - 1])
else:
self.excFDIndexes_2 = np.append(self.excFDIndexes_2, i - 1)
def micro_displacement(self):
n = len(self.data) - 1
sqDispl = 0
if not self.isNewDCMBlock:
for i in range(3):
sqDispl += self.data[n, i]**2
self.rsqDispl = np.append(self.rsqDispl, np.sqrt(sqDispl))
self.MD = np.append(self.MD, abs(self.rsqDispl[-2]-self.rsqDispl[-1]))
self.meanMD = self.meanMD + (self.MD[-1] - self.meanMD) / self.blockIter
else:
self.MD = np.append(self.MD, 0)
self.meanMD = 0
if self.MD[-1] >= self.threshold[0]:
self.excVD += 1
if self.excMDIndexes[-1] == -1:
self.excMDIndexes = np.array([n-1])
else:
self.excMDIndexes = np.append(self.excMDIndexes, n-1)
def calc_mc_plots(self, data, isNewDCMBlock):
self.isNewDCMBlock = isNewDCMBlock
self.data = np.vstack((self.data, data))
self.micro_displacement()
self.all_fd()
if isNewDCMBlock:
self.blockIter = 1
def draw_mc_plots(self, mdFlag, trPlotitem, rotPlotitem, fdPlotitem):
x = np.arange(1, self.data.shape[0] + 1, dtype=np.float64)
trPlotitem.clear()
rotPlotitem.clear()
fdPlotitem.clear()
for i in range(0, 3):
trPlotitem.plot(x=x, y=self.data[:, i], pen=c.PLOT_PEN_COLORS[i], name=self.names[i])
for i in range(3, 6):
rotPlotitem.plot(x=x, y=self.data[:, i]*50, pen=c.PLOT_PEN_COLORS[i], name=self.names[i])
x = np.arange(1, self.data.shape[0], dtype=np.float64)
if mdFlag:
fdPlotitem.setLabel('left', "MD [mm]")
fdPlotitem.plot(x=x, y=self.MD, pen=c.PLOT_PEN_COLORS[0], name='MD')
fdPlotitem.plot(x=np.arange(0, self.xmax, dtype=np.float64), y=self.threshold[0] * np.ones(self.xmax),
pen=c.PLOT_PEN_COLORS[2], name='thr')
else:
fdPlotitem.setLabel('left', "FD [mm]")
fdPlotitem.plot(x=x, y=self.FD, pen=c.PLOT_PEN_COLORS[0], name='FD')
thresholds = self.threshold[1:3]
for i, t in enumerate(thresholds):
fdPlotitem.plot(x=np.arange(0, self.xmax, dtype=np.float64), y=float(t) * np.ones(self.xmax),
pen=c.PLOT_PEN_COLORS[i + 1], name='thr' + str(i))
|
from scipy.linalg import expm, norm
import numpy as np
def rot_mat(axis, theta):
return expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def rotate_vector(v, axis, theta):
M = rot_mat(axis, theta)
return np.tensordot(M,v,axes=([0],[1])).T #np.dot(M, v)
def rotate_around_z(v, theta):
return rotate_vector(v, np.array([0.,0.,1.]), theta)
def is_odd(num):
return num & 0x1
def is_inside_hexagon(x, y, d=None, x0=0., y0=0.):
p_eps = 10.*np.finfo(float).eps
if d is None:
d = y.max() - y.min() + p_eps
dx = np.abs(x - x0)/d
dy = np.abs(y - y0)/d
a = 0.25 * np.sqrt(3.0)
return np.logical_and(dx <= a, a*dy + 0.25*dx <= 0.5*a)
def get_hex_plane(plane_idx, inradius, z_height, z_center, np_xy,
np_z):
# We use 10* float machine precision to correct the ccordinates
# to avoid leaving the computational domain due to precision
# problems
p_eps = 10.*np.finfo(float).eps
ri = inradius # short for inradius
rc = inradius/np.sqrt(3.)*2. # short for circumradius
if np_z == 'auto':
np_z = int(np.round(float(np_xy)/2./rc*z_height))
# XY-plane (no hexagonal shape!)
if plane_idx == 6:
X = np.linspace(-ri+p_eps, ri-p_eps, np_xy)
Y = np.linspace(-rc+p_eps, rc-p_eps, np_xy)
XY = np.meshgrid(X,Y)
XYrs = np.concatenate((XY[0][..., np.newaxis],
XY[1][..., np.newaxis]),
axis=2)
Z = np.ones((np_xy, np_xy, 1))*z_center
pl = np.concatenate((XYrs, Z), axis=2)
pl = pl.reshape(-1, pl.shape[-1])
# Restrict to hexagon
idx_hex = is_inside_hexagon(pl[:,0], pl[:,1])
return pl[idx_hex]
# Vertical planes
elif plane_idx < 6:
r = rc if is_odd(plane_idx) else ri
r = r-p_eps
xy_line = np.empty((np_xy,2))
xy_line[:,0] = np.linspace(-r, r, np_xy)
xy_line[:,1] = 0.
z_points = np.linspace(0.+p_eps, z_height-p_eps, np_z)
# Construct the plane
plane = np.empty((np_xy*np_z, 3))
for i, xy in enumerate(xy_line):
for j, z in enumerate(z_points):
idx = i*np_z + j
plane[idx, :2] = xy
plane[idx, 2] = z
# Rotate the plane
return rotate_around_z(plane, plane_idx*np.pi/6.)
else:
raise ValueError('`plane_idx` must be in [0...6].')
def get_hex_planes_point_list(inradius, z_height, z_center, np_xy, np_z,
plane_indices=[0,1,2,3,6]):
# Construct the desired planes
planes = []
for i in plane_indices:
planes.append(get_hex_plane(i, inradius, z_height, z_center,
np_xy, np_z))
# Flatten and save lengths
lengths = [len(p) for p in planes]
return np.vstack(planes), np.array(lengths)
def hex_planes_point_list_for_keys(keys, plane_indices=[0,1,2,3,6]):
if not 'uol' in keys:
keys['uol'] = 1.e-9
inradius = keys['p'] * keys['uol'] /2.
z_height = (keys['h'] + keys['h_sub'] + keys['h_sup']) * keys['uol']
z_center = (keys['h_sub']+keys['h']/2.) * keys['uol']
np_xy = keys['hex_np_xy']
if not 'hex_np_z' in keys:
np_z = 'auto'
return get_hex_planes_point_list(inradius, z_height, z_center, np_xy,
np_z)
def plane_idx_iter(lengths_):
"""Yields the plane index plus lower index `idx_i` and upper index
`idx_f` of the point list representing this plane
(i.e. pointlist[idx_i:idx_f]).
"""
i = 0
while i < len(lengths_):
yield i, lengths_[:i].sum(), lengths_[:(i+1)].sum()
i += 1
def plot_planes(pointlist, lengths):
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = sns.color_palette('husl', len(lengths))
for i, idx_i, idx_f in plane_idx_iter(lengths):
pl = pointlist[idx_i:idx_f]
ax.scatter(pl[:,0], pl[:,1], pl[:,2], s=10., c=colors[i],
label='plane {}'.format(i+1), linewidth=0.)
_ = plt.legend(loc='upper left')
|
import requests
import json
class NoComposites(Exception):
def __init__(self, message):
self.message = message
class NotFoundError(Exception):
def __init__(self, message):
self.message = message
class AuthenticationError(Exception):
def __init__(self, message):
self.message = message
class KatelloConnection(object):
def __init__(self, base_url, username, password, verify, organization):
"""
:param base_url: Url to connect to
:type base_url: str
:param username: Username for querying
:type username: str
:param password: Password for querying
:type password: str
:param verify: Whether to accept self-signed ssl
:type verify: bool
:param organization: Organization to use
:type organization: str
:returns: KatelloConnection object
:rtype: KatelloConnection
"""
self.organization = organization
self.base_url = base_url
self.session = requests.Session()
self.session.auth = (username, password)
self.session.verify = verify
self.post_headers = {'Content-Type': 'application/json'}
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Check if we can authenticate
if 'results' not in self.session.get('%s/katello/api/v2/organizations' % self.base_url).json():
raise AuthenticationError('Authentication failed')
def _query_api(self, url):
"""
Succesively query the API, appending "?page=<int>" to the url, each time incrementing the int by one.
When we get no results, return the collected data.
:param url: Url that needs to be concatenated to the base_url
:type url: str
:return: List of dictionaries as returned from the api ['results']
:rtype: list
"""
if url.endswith('/'):
url = url[:-1]
counter = 1
results = list()
while True:
page_result = self.session.get(url, params={'page': counter}).json()['results']
if not page_result:
break
results += page_result
counter += 1
return results
def __getattr__(self, item):
"""
Dynamically defer the requested attribute to the API
:param item: The attribute we want (content_views, organizations, etc)
:type item: str
:return: List of dictionaries as returned from the api call ['results']
:rtype: list
"""
if item == 'orgid':
if 'orgid' not in self.__dict__:
self.orgid = self._get_orgid()
return self.orgid
if item == 'foreman_tasks':
return self._query_api('%s/foreman_tasks/api/tasks' % self.base_url)
try:
return self._get_katello_dict('%s/' % item)
except ValueError:
return self._get_foreman_dict('%s/' % item)
def _get_katello_dict(self, uri, clean=False):
"""
Call the katello api (https://url/katello/api/v2)
:param uri: Url to query
:type uri: str
:param clean: When true, we do not want to use the organization part in the url
:type clean: bool
:return: List of dictionaries as returned from the api call ['results']
:rtype: list
"""
if not uri == 'organizations/' and not clean:
try:
return self._query_api('%s/katello/api/v2/organizations/%s/%s' % (self.base_url, self.orgid, uri))
except ValueError:
pass
return self._query_api('%s/katello/api/v2/%s' % (self.base_url, uri))
def _get_foreman_dict(self, uri):
"""
Call the foreman api (https://url/api/v2)
:param uri: Url to query
:return: List of dictionaries as returned from the api call ['results']
:rtype: list
"""
return self._query_api('%s/api/v2/%s' % (self.base_url, uri))
def _get_orgid(self):
"""
Get the organizational id of our organization
:rtype: int
"""
for org in self.organizations:
if org['name'] == self.organization:
return org['id']
def get_version_info(self, version_id):
return self.session.get(
'%s/katello/api/content_view_versions/%s' % (self.base_url, version_id),
headers=self.post_headers,
).json()
def get_compute_profiles(self, profile_id):
"""
Get the compute profiles
:param profile_id: the compute profile id
:type profile_id: int or str
:return: List of compute profile dicts
:rtype: list
"""
return self.session.get(
'%s/api/compute_profiles/%s' % (self.base_url, profile_id),
headers=self.post_headers,
).json()
def create_compute_profile(self, json_data=None):
"""
Create a compute profile
:param json_data: the json data structure object
:type json_data: str
:return: List of newly created compute profile dicts
:rtype: list
"""
return self.session.post(
'%s/api/compute_profiles' % self.base_url,
data=json_data,
headers=self.post_headers,
).json()
def add_compute_attributes(self, cr_id, cp_id, data=None):
"""
Add the compute attributes to the compute profiles
:param cr_id: compute resource id
:type cr_id: int or str
:param cp_id: compute profile id
:type cp_id: int or str
:param data: Additional post data
:type data: dict
:return: List of compute profile dicts
:rtype: list
"""
return self.session.post(
'%s/api/compute_resources/%s/compute_profiles/%s/compute_attributes' % (self.base_url, cr_id, cp_id),
data=json.dumps(data),
headers=self.post_headers,
).json()
def get_collection_contents(self, collection_name):
"""
Get the hosts that belong to a host_collection
:param collection_name: Name of the host_collection to query
:type collection_name: str
:return: List of host dicts
:rtype: list
"""
col_id = [h['id'] for h in self.host_collections if h['name'] == collection_name][0]
return self._get_katello_dict('host_collections/%s/systems' % col_id, clean=True)
def publish_view(self, c_id, data=None):
"""
:param c_id: content view id
:type c_id: int or str
:param data: Additional post data
:type data: dict
:return: json output of the api query
:rtype: dict
"""
return self.session.post(
'%s/katello/api/content_views/%s/publish' % (self.base_url, c_id),
data=json.dumps(data),
headers=self.post_headers,
).json()
def update_view(self, c_id, data=None):
"""
:param c_id: content view id
:type c_id: int or str
:param data: Additional post data
:type data: dict
:return: json output of the api query
"""
return self.session.put(
'%s/katello/api/content_views/%s' % (self.base_url, c_id),
data=json.dumps(data),
headers=self.post_headers,
).json()
def remove_view_version(self, v_id):
"""
:param v_id: content view id
:type v_id: int or str
:return: json output of the api query
"""
return self.session.delete(
'%s/katello/api/content_view_versions/%s' % (self.base_url, v_id),
headers=self.post_headers,
).json()
def promote_view(self, v_id, data=None):
return self.session.post(
'%s/katello/api/content_view_versions/%s/promote' % (self.base_url, v_id),
data=json.dumps(data),
headers=self.post_headers,
).json()
def get_components(datalist, index):
"""
Given a list of dictionaries, return the first key encountered in the first dict
so given datalist =
[{'name': 'name1', 'val1': 'val1'}, {'name': 'name2', 'val': 'val2'}]
we can get only the second item:
get_components(datalist, ('name', 'name2'))
:param datalist: List of dictionaries to search in
:type datalist: list
:param index: Tuple to search for. Index0 is the key, Index1 the value
:type index: tuple
:return: The dictionary that matches when found, else None
:rtype: dict
"""
search_key = index[0]
search_val = index[1]
for structure in datalist:
try:
if structure[search_key] == search_val:
return structure
except KeyError:
pass
return dict()
def get_latest_cv_version(versionlist):
latest_version_id = sorted([float(v['version']) for v in versionlist])[-1]
return get_components(versionlist, ('version', str(latest_version_id)))
|
""" This module deals with relative dates (2d, 5y, Monday, today, etc.) """
from datetime import date, timedelta
import calendar
import re
def _add_months(p_sourcedate, p_months):
"""
Adds a number of months to the source date.
Takes into account shorter months and leap years and such.
https://stackoverflow.com/questions/4130922/how-to-increment-datetime-month-in-python
"""
month = p_sourcedate.month - 1 + p_months
year = p_sourcedate.year + month // 12
month = month % 12 + 1
day = min(p_sourcedate.day, calendar.monthrange(year, month)[1])
return date(year, month, day)
def _convert_pattern(p_length, p_periodunit, p_offset=None):
"""
Converts a pattern in the form [0-9][dwmy] and returns a date from the
offset with the period of time added to it.
"""
result = None
p_offset = p_offset or date.today()
p_length = int(p_length)
if p_periodunit == 'd':
result = p_offset + timedelta(p_length)
elif p_periodunit == 'w':
result = p_offset + timedelta(weeks=p_length)
elif p_periodunit == 'm':
result = _add_months(p_offset, p_length)
elif p_periodunit == 'y':
result = _add_months(p_offset, p_length * 12)
return result
def _convert_weekday_pattern(p_weekday):
"""
Converts a weekday name to an absolute date.
When today's day of the week is entered, it will return today and not next
week's.
"""
day_value = {
'mo': 0,
'tu': 1,
'we': 2,
'th': 3,
'fr': 4,
'sa': 5,
'su': 6
}
target_day_string = p_weekday[:2].lower()
target_day = day_value[target_day_string]
day = date.today().weekday()
shift = (target_day - day) % 7
return date.today() + timedelta(shift)
def relative_date_to_date(p_date, p_offset=None):
"""
Transforms a relative date into a date object.
The following formats are understood:
* [0-9][dwmy]
* 'today' or 'tomorrow'
* days of the week (in full or abbreviated)
"""
result = None
p_date = p_date.lower()
p_offset = p_offset or date.today()
relative = re.match('(?P<length>-?[0-9]+)(?P<period>[dwmy])$', p_date, re.I)
monday = 'mo(n(day)?)?$'
tuesday = 'tu(e(sday)?)?$'
wednesday = 'we(d(nesday)?)?$'
thursday = 'th(u(rsday)?)?$'
friday = 'fr(i(day)?)?$'
saturday = 'sa(t(urday)?)?$'
sunday = 'su(n(day)?)?$'
weekday = re.match('|'.join(
[monday, tuesday, wednesday, thursday, friday, saturday, sunday]),
p_date)
if relative:
length = relative.group('length')
period = relative.group('period')
result = _convert_pattern(length, period, p_offset)
elif weekday:
result = _convert_weekday_pattern(weekday.group(0))
elif re.match('tod(ay)?$', p_date):
result = _convert_pattern('0', 'd')
elif re.match('tom(orrow)?$', p_date):
result = _convert_pattern('1', 'd')
return result
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0005_bares_views'),
]
operations = [
migrations.AddField(
model_name='tapas',
name='likes',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
|
import sys
import os
import djcelery
from django.conf import settings
djcelery.setup_loader()
from datetime import timedelta
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEFAULT_CANDIDATE_EXTRA_INFO = {
"portrait_photo": "http://votainteligente.cl/static/img/candidate-default.jpg",
'custom_ribbon': 'ribbon text'
}
DEFAULT_ELECTION_EXTRA_INFO = {
"extra": "Extra extra extra"
}
TESTING = 'test' in sys.argv
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'djcelery',
'django_nose',
'django.contrib.sitemaps',
'candidator',
'taggit',
'haystack',
'elections',
'popolo',
'markdown_deux',
'django_extensions',
'pagination',
'sorl.thumbnail',
'django_admin_bootstrapped',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'tinymce',
'mathfilters',
'newsletter',
'rest_framework',
'popolorest',
'writeit',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
)
SITE_ID = 1
NEWSLETTER_CONFIRM_EMAIL = False
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
EMAIL_HOST = 'cuttlefish.oaf.org.au'
EMAIL_PORT = 2525
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
THUMBNAIL_DEBUG = True
CANDIDEITORG_URL = 'http://localhost:3002/api/v2/'
CANDIDEITORG_USERNAME = 'admin'
CANDIDEITORG_API_KEY = 'a'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERY_ALWAYS_EAGER = True
CELERYBEAT_SCHEDULE = {
'pushing-to-writeit-every-2-minutes': {
'task': 'elections.tasks.send_mails_using_writeit',
'schedule': timedelta(minutes=2),
},
}
CELERY_TIMEZONE = 'UTC'
TINYMCE_JS_URL = os.path.join(settings.STATIC_URL, 'js/tiny_mce/tiny_mce.js')
TINYMCE_JS_ROOT = os.path.join(settings.STATIC_URL, 'js/tiny_mce')
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
if TESTING:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'votainteligente_test',
},
}
else:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'votainteligente',
},
}
EXTRA_APPS = ()
NAV_BAR = ('profiles', 'questionary', 'soulmate', 'facetoface', 'ask', 'ranking')
WEBSITE_METADATA = {
'author': u'Name of the author',
'description': u'A description for the site',
'keywords': u'some,tags,separated,by,comma'
}
WEBSITE_OGP = {
'title': u'Title page for Facebook OGP',
'type': 'website',
'url': 'http://www.mi-domain.org/',
'image': 'img/votai-196.png'
}
WEBSITE_DISQUS = {
'enabled': True,
'shortname': 'shortname_disqus',
'dev_mode': 0
}
WEBSITE_GA = {
'code': 'UA-XXXXX-X',
'name': 'ga_name',
'gsite-verification': 'BCyMskdezWX8ObDCMsm_1zIQAayxYzEGbLve8MJmxHk'
}
WEBSITE_IMGUR = {
# example client_id, only works with 50 pic a day
'client_id': 'eb18642b5b220484864483b8e21386c3',
}
WEBSITE_GENERAL_SETTINGS = {
'home_title': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit.'
}
WEBSITE_TWITTER = {
'hashtags': 'votainteligente'
}
CACHE_MINUTES = 0
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
LOGGING = {'version': 1,
'disable_existing_loggers': True,
'formatters': {'simple': {'format': '%(asctime)s %(levelname)s %(message)s'}},
'handlers': {'console': {'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple'},
'null': {'level': 'DEBUG',
'class': 'logging.NullHandler',
},
},
'loggers': {'django.db.backends': {'level': 'DEBUG', 'handlers': ['null'], 'propagate': False}}
}
WRITEIT_NAME = 'votainteligente'
INSTANCE_URL = "/api/v1/instance/<THE_INSTANCE_ID>/"
WRITEIT_ENDPOINT = 'http://writeit.ciudadanointeligente.org'
WRITEIT_USERNAME = '<YOUR_USERNAME>'
WRITEIT_KEY = '<YOUR_API_KEY>'
NEW_ANSWER_ENDPOINT = 'NEW_ANSWER_ENDPOINT'
THEME = None
from django.conf import settings
from django_nose import NoseTestSuiteRunner
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
class Runner(NoseTestSuiteRunner):
def run_tests(self, test_labels, extra_tests=None):
settings.MIGRATION_MODULES = DisableMigrations()
super(Runner, self).run_tests(test_labels, extra_tests=extra_tests)
DONT_USE_MIGRATIONS = 'DONT_USE_MIGRATIONS' in os.environ.keys() and os.environ['DONT_USE_MIGRATIONS'] == '1'
if DONT_USE_MIGRATIONS:
TEST_RUNNER = 'votainteligente.votainteligente_settings.Runner'
try:
from local_settings import *
except ImportError, e:
pass
|
import autophy
import sys
if len(sys.argv) < 2:
print "usage: clean_phlawd_sourcedb.py <sourcedb_filename>"
sys.exit(0);
db = autophy.Database(sys.argv[1])
response = raw_input("Are you sure you want to wipe the database? All tables except taxonomy will be erased?\n" \
"enter yes or no: ")
while response != "yes" and response != "q" and response != "quit" and response != "no":
response = raw_input("type 'yes' to erase, or 'q', 'quit', or 'no' to quit: ")
if response == "yes":
db.wipe()
|
'''
Outputs an HTML webpage to stdout to visualize a set of colors
'''
def mkColor(name, color):
''' Converts a `name` and `rgb` (any CSS format) to a few CSS lines '''
return '.color-{} {{\n\tcolor: {}\n}}\n'.format(name, color)
CSS_COLORS = {
'white': 'Beige',
'black': 'DarkSlateGrey',
'blue': 'DarkSlateBlue',
'green': 'ForestGreen',
'lightred': 'Tomato',
'red': 'Crimson',
'magenta': 'MediumVioletRed',
'brown': 'Chocolate',
'yellow': 'GoldenRod',
'lightgreen': 'LightGreen',
'cyan': 'LightSeaGreen',
'lightcyan': 'LightSkyBlue',
'lightblue': 'RoyalBlue',
'lightmagenta': 'HotPink',
'darkgray': 'DimGrey',
'gray': 'LightSlateGrey',
}
css = ''
for color in CSS_COLORS:
css += mkColor(color, CSS_COLORS[color])
css += '''
table {
width: 40%;
padding: 30px;
}
'''
content = ''
for color in CSS_COLORS:
content += ('<tr><td>{}</td><td class="color-{}">{}</td>' +
'<td style="background-color: {}; padding-left:200px;">' +
'</td></tr>\n').format(
color, color, color, CSS_COLORS[color])
content = '''<table style="background-color: white; color: black; float: left">
{}
</table>
<table style="background-color: black; color: white;">
{}
</table>
'''.format(content, content)
page = '''
<html>
<head>
<style>
''' + css + '''
</style>
</head>
<body>
''' + content + '''
</html>'''
print(page)
|
import boto3
regions_name=['cn-northwest-1', 'cn-north-1']
start_filter = [
{'Name': 'tag:AutoStart', 'Values': ['true', 'True', 'TRUE']},
{'Name': 'instance-state-name', 'Values': ['stopped']}
]
def regional_start_ec2(region_name='cn-northwest-1'):
print("Auto Start EC2 in region {}".format(region_name))
# the ec2 resource
ec2 = boto3.resource('ec2', region_name=region_name)
# get list of filtered instances
instances = ec2.instances.filter(Filters=start_filter)
# get instance id list
filtered_ids = [instance.id for instance in instances]
# verify there are instances to start
if len(filtered_ids) > 0:
print("AutoStart Instances:{}".format(filtered_ids))
start_message = instances.start()
print("Start Response:{}".format(start_message))
else:
print("No EC2 instances found to start")
def lambda_handler(event, context):
for region in regions_name:
regional_start_ec2(region)
print('Auto Start EC2 Done')
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(974, 502)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color:#301010;")
MainWindow.setDocumentMode(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.frameTab = QtWidgets.QWidget()
self.frameTab.setObjectName("frameTab")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frameTab)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.framelayout = QtWidgets.QHBoxLayout()
self.framelayout.setObjectName("framelayout")
self.videoframe = QtWidgets.QFrame(self.frameTab)
self.videoframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.videoframe.setFrameShadow(QtWidgets.QFrame.Raised)
self.videoframe.setObjectName("videoframe")
self.framelayout.addWidget(self.videoframe)
self.horizontalLayout_3.addLayout(self.framelayout)
self.tabWidget.addTab(self.frameTab, "")
self.playlistTab = QtWidgets.QWidget()
self.playlistTab.setObjectName("playlistTab")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.playlistTab)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.playlistTable = QtWidgets.QTableWidget(self.playlistTab)
self.playlistTable.setSizeIncrement(QtCore.QSize(100, 0))
self.playlistTable.setStyleSheet("background-color:#481616; color:white;")
self.playlistTable.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.playlistTable.setObjectName("playlistTable")
self.playlistTable.setColumnCount(0)
self.playlistTable.setRowCount(0)
self.playlistTable.horizontalHeader().setStretchLastSection(True)
self.playlistTable.verticalHeader().setVisible(False)
self.horizontalLayout_7.addWidget(self.playlistTable)
self.musicTable = QtWidgets.QTableWidget(self.playlistTab)
self.musicTable.setStyleSheet("background-color:#481616; color:white;")
self.musicTable.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.musicTable.setObjectName("musicTable")
self.musicTable.setColumnCount(0)
self.musicTable.setRowCount(0)
self.musicTable.horizontalHeader().setStretchLastSection(True)
self.musicTable.verticalHeader().setVisible(False)
self.horizontalLayout_7.addWidget(self.musicTable)
self.horizontalLayout_5.addLayout(self.horizontalLayout_7)
self.tabWidget.addTab(self.playlistTab, "")
self.verticalLayout.addWidget(self.tabWidget)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.currentTime = QtWidgets.QLabel(self.centralwidget)
self.currentTime.setMinimumSize(QtCore.QSize(57, 0))
self.currentTime.setMaximumSize(QtCore.QSize(57, 16777215))
self.currentTime.setStyleSheet("color:white")
self.currentTime.setObjectName("currentTime")
self.horizontalLayout_8.addWidget(self.currentTime)
self.positionSlider = QtWidgets.QSlider(self.centralwidget)
self.positionSlider.setOrientation(QtCore.Qt.Horizontal)
self.positionSlider.setObjectName("positionSlider")
self.horizontalLayout_8.addWidget(self.positionSlider)
self.totalTime = QtWidgets.QLabel(self.centralwidget)
self.totalTime.setMinimumSize(QtCore.QSize(57, 0))
self.totalTime.setMaximumSize(QtCore.QSize(57, 16777215))
self.totalTime.setStyleSheet("color:white")
self.totalTime.setObjectName("totalTime")
self.horizontalLayout_8.addWidget(self.totalTime)
self.verticalLayout_2.addLayout(self.horizontalLayout_8)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.playButton = QtWidgets.QPushButton(self.centralwidget)
self.playButton.setMinimumSize(QtCore.QSize(80, 0))
self.playButton.setMaximumSize(QtCore.QSize(80, 16777215))
self.playButton.setStyleSheet("color:white")
self.playButton.setObjectName("playButton")
self.horizontalLayout_4.addWidget(self.playButton)
self.stopButton = QtWidgets.QPushButton(self.centralwidget)
self.stopButton.setMinimumSize(QtCore.QSize(80, 0))
self.stopButton.setMaximumSize(QtCore.QSize(80, 16777215))
self.stopButton.setStyleSheet("color:white")
self.stopButton.setObjectName("stopButton")
self.horizontalLayout_4.addWidget(self.stopButton)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMinimumSize(QtCore.QSize(50, 0))
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.horizontalLayout_4.addWidget(self.label_2)
self.volumeSlider = QtWidgets.QSlider(self.centralwidget)
self.volumeSlider.setMinimumSize(QtCore.QSize(50, 0))
self.volumeSlider.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.volumeSlider.setOrientation(QtCore.Qt.Horizontal)
self.volumeSlider.setObjectName("volumeSlider")
self.horizontalLayout_4.addWidget(self.volumeSlider)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 974, 22))
self.menubar.setDefaultUp(True)
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuBad_Player = QtWidgets.QMenu(self.menubar)
self.menuBad_Player.setAutoFillBackground(False)
self.menuBad_Player.setObjectName("menuBad_Player")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionAddPlaylist = QtWidgets.QAction(MainWindow)
self.actionAddPlaylist.setObjectName("actionAddPlaylist")
self.actionAddMedia = QtWidgets.QAction(MainWindow)
self.actionAddMedia.setObjectName("actionAddMedia")
self.actionAddMusicFromYoutube = QtWidgets.QAction(MainWindow)
self.actionAddMusicFromYoutube.setObjectName("actionAddMusicFromYoutube")
self.actionAddMusicFromSoundcloud = QtWidgets.QAction(MainWindow)
self.actionAddMusicFromSoundcloud.setObjectName("actionAddMusicFromSoundcloud")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionOpenDownloader = QtWidgets.QAction(MainWindow)
self.actionOpenDownloader.setObjectName("actionOpenDownloader")
self.actionParameters = QtWidgets.QAction(MainWindow)
self.actionParameters.setCheckable(False)
self.actionParameters.setObjectName("actionParameters")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionAddPlaylist)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionAddMedia)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionOpenDownloader)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuBad_Player.addAction(self.actionParameters)
self.menuBad_Player.addAction(self.actionAbout)
self.menubar.addAction(self.menuBad_Player.menuAction())
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.frameTab), _translate("MainWindow", "Player"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.playlistTab), _translate("MainWindow", "Playlists"))
self.currentTime.setText(_translate("MainWindow", "00:00:00"))
self.totalTime.setText(_translate("MainWindow", "00:00:00"))
self.playButton.setText(_translate("MainWindow", "Play"))
self.stopButton.setText(_translate("MainWindow", "Stop"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuBad_Player.setTitle(_translate("MainWindow", "Bad Player"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionAddPlaylist.setText(_translate("MainWindow", "Add playlist"))
self.actionAddMedia.setText(_translate("MainWindow", "Add music from file or url"))
self.actionAddMusicFromYoutube.setText(_translate("MainWindow", "Add music from Youtube"))
self.actionAddMusicFromSoundcloud.setText(_translate("MainWindow", "Add music from Soundcloud"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionOpenDownloader.setText(_translate("MainWindow", "Open downloader"))
self.actionParameters.setText(_translate("MainWindow", "Parameters"))
self.actionAbout.setText(_translate("MainWindow", "About"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
"""
Create cluster job files to align a batch of reads to a genome, using bwa.
"""
from sys import argv,stdin,stdout,stderr,exit
from re import compile
from math import ceil
def usage(s=None):
message = """
usage: make_bwa_jobs [options]
--base=<path> path prefix; other filenames can use "{base}" to
refer to this path
--ref=<filename> (mandatory) name of reference fasta file
we also expect a bwa reference index in
<filename>.bwt, <filename>.pac, <filename>.ann,
<filename>.amb, and <filename>.sa
--reads=<filename> (mandatory,cumulative) name of query fastq file(s)
these must contain {mate} to indicate "1" or "2",
and may optionally contain a short identifier as a
prefix ending in ":"; e.g.
W1:reads/MQ854_K763WCJIIW_s_1_{mate}.fastq
--phred64to33 convert fastq files from phred+64 to phred+33 'on
the fly'
--readgroupinfo=<string> read group info string to shove into the sam header,
e.g. "ID:SRR748091 SM:SAMN01920490 PL:Illumina LB:lib1 PU:unknown"
(this is only supported for sampe)
--id=<string> unique identifier for this project
--job=<path> (mandatory) where to create job files
--results=<path> (mandatory) where to write alignment results
--subblocks=[<L>:]<filespec> create several jobs for each reads file, with
each job dealing with on "block" of the reads;
<filespec> is the name of a file suitable for use
with extract_blocks (a tabulated index); <L> is
the number of lines (in that index) to process in
each job; if there is more than one reads file,
<filespec> must contain "{reads}" which is
replaced by the reads file name; a typical
<filespec> is:
{reads}.tabulated
--exe:bwa=<filename> location of bwa executable
--aln=<params> (cumulative) parameters to pass to bwa aln
--sampe=<params> (cumulative) parameters to pass to bwa sampe
--mem[=<params>] (cumulative) parameters to pass to bwa mem
(by default we run "bwa aln" and "bwa sampe"
--mem:unpaired[=<params>] (cumulative) parameters to pass to bwa mem, and
we're aligning reads as unpaired
--sort=<params> (cumulative) parameters to pass to samtools sort
--memory:aln=<bytes> run-time memory limit for aln (e.g. "5.6G")
add ":ulimit" to enforce this in the shell script
--memory:sampe=<bytes> run-time memory limit for sampe (e.g. "5.6G")
add ":ulimit" to enforce this in the shell script
--memory:mem=<bytes> run-time memory limit for mem (e.g. "5.6G")
add ":ulimit" to enforce this in the shell script
--threads:aln=<number> number of threads for aln
--threads:sampe=<number> number of threads for sampe
--threads:mem=<number> number of threads for mem
--initialize=<text> (cumulative) shell command to add to job beginning
"shebang:bash" is mapped "#!/usr/bin/env bash"
other commands are copied "as is"
--nojobnames inhibit job names in submission list
--outputas=sam output in sam format (this is the default)
--outputas=bam output in bam format
--outputas=bamsorted output in sorted bam format
Typical command line:
make_bwa_jobs \\
--base=" /home/username/projects/orange" \\
--ref=" {base}/reference/MalusDomestica0.fa" \\
--id=" apple" \
--reads=" {id}1:reads/MQ854_K763WCJIIW_s_1_{mate}.fastq" \\
--job=" {base}/jobs" \\
--results="{base}/results" \\
--aln=" -I -q 15 -l 35 -k 2 -n 0.04 -o 2 -e 6" \\
--sampe=" -P -n 100 -N 100" \\
--memory:aln=" 5.6G" \\
--memory:sampe="6.2G" """
if (s == None): exit (message)
else: exit ("%s%s" % (s,message))
def main():
global basePath,refFilename,readsFilespecs,jobDirectory,resultsDirectory
global bwaProgramName,commandParams,phred64to33
global bashInitializers,maxMemory,numThreads,readGroupInfo
global jobId,giveJobsNames,jobNumber
global outputAs
global debug
# parse the command line
basePath = None
refFilename = None
readsFilespecs = None
phred64to33 = False
readGroupInfo = None
jobId = None
jobDirectory = None
resultsDirectory = None
subsampleFileSpec = None
subsampleBlockSize = None
bwaProgramName = None
aligner = "aln"
commandParams = {}
maxMemory = {}
numThreads = {}
bashInitializers = ["set -eu"]
giveJobsNames = True
outputAs = "sam"
debug = []
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1].strip()
if (arg.startswith("--base=")) or (arg.startswith("--basepath=")) or (arg.startswith("--path=")):
basePath = argVal
elif (arg.startswith("--reference=")) or (arg.startswith("--ref=")):
refFilename = argVal
elif (arg.startswith("--exe:bwa=")) or (arg.startswith("--bwaexe=")):
bwaProgramName = argVal
elif (arg.startswith("--reads=")):
if (readsFilespecs == None): readsFilespecs = []
readsFilespecs += argVal.split(",")
elif (arg == "--phred64to33"):
phred64to33 = True
elif (arg.startswith("--readgroupinfo=")):
readGroupInfo = "\\t".join(argVal.split())
elif (arg.startswith("--id=")):
jobId = argVal
elif (arg.startswith("--job=")):
jobDirectory = argVal
elif (arg.startswith("--results=")):
resultsDirectory = argVal
elif (arg.startswith("--subblocks=")):
# we allow --subblocks=spec or --subblocks=L:spec
subsampleFileSpec = subsampleBlockSize = None
if (":" not in argVal):
subsampleBlockSize = 1
subsampleFileSpec = argVal
else:
(subsampleBlockSize,subsampleFileSpec) = argVal.split(":",1)
subsampleBlockSize = int(subsampleBlockSize)
assert (subsampleBlockSize >= 1)
elif (arg.startswith("--aln=")) or (arg.startswith("--params:aln=")):
params = argVal
if ("aln" not in commandParams): commandParams["aln"] = []
commandParams["aln"] += [params]
elif (arg.startswith("--sampe=")) or (arg.startswith("--params:sampe=")):
params = argVal
if ("sampe" not in commandParams): commandParams["sampe"] = []
commandParams["sampe"] += [params]
elif (arg.startswith("--mem=")) or (arg.startswith("--params:mem=")):
params = argVal
if ("mem" not in commandParams): commandParams["mem"] = []
commandParams["mem"] += [params]
aligner = "mem"
elif (arg == "--mem"):
aligner = "mem"
elif (arg.startswith("--mem:unpaired=")) or (arg.startswith("--params:memunpaired=")):
params = argVal
if ("mem" not in commandParams): commandParams["mem"] = []
commandParams["mem"] += [params]
aligner = "mem unpaired"
elif (arg == "--mem:unpaired"):
aligner = "mem unpaired"
elif (arg.startswith("--sort=")) or (arg.startswith("--params:sort=")):
params = argVal
if ("sort" not in commandParams): commandParams["sort"] = []
commandParams["sort"] += [params]
elif (arg.startswith("--memory:aln=")):
if (argVal.endswith(":ulimit")):
argVal = argVal[:-len(":ulimit")]
maxMemory["aln"] = argVal
maxMemory["aln:ulimit"] = True
else:
maxMemory["aln"] = argVal
elif (arg.startswith("--memory:sampe=")):
if (argVal.endswith(":ulimit")):
argVal = argVal[:-len(":ulimit")]
maxMemory["sampe"] = argVal
maxMemory["sampe:ulimit"] = True
else:
maxMemory["sampe"] = argVal
elif (arg.startswith("--memory:mem=")):
if (argVal.endswith(":ulimit")):
argVal = argVal[:-len(":ulimit")]
maxMemory["mem"] = argVal
maxMemory["mem:ulimit"] = True
else:
maxMemory["mem"] = argVal
elif (arg.startswith("--threads:aln=")):
val = int(argVal)
assert (val > 0)
if (val > 1): numThreads["aln"] = val
elif (arg.startswith("--threads:sampe=")):
val = int(argVal)
assert (val > 0)
if (val > 1): numThreads["sampe"] = val
elif (arg.startswith("--threads:mem=")):
val = int(argVal)
assert (val > 0)
if (val > 1): numThreads["mem"] = val
elif (arg.startswith("--initialize=")) or (arg.startswith("--init=")):
if (argVal == "shebang:bash"):
argVal = "#!/usr/bin/env bash"
if (argVal == "set -eu"):
bashInitializers = [x for x in bashInitializers if (x != "set -eu")]
bashInitializers += [argVal]
elif (arg == "--nojobnames"):
giveJobsNames = False
elif (arg.startswith("--outputas=")) and (argVal == "sam"):
outputAs = "sam"
elif (arg.startswith("--outputas=")) and (argVal == "bam"):
outputAs = "bam"
elif (arg.startswith("--outputas=")) and (argVal in ["bamsorted","sortedbam"]):
outputAs = "sorted bam"
elif (arg == "--debug"):
debug += ["debug"]
elif (arg.startswith("--debug=")):
debug += argVal.split(",")
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
else:
usage("unrecognized option: %s" % arg)
# validate params
assert (refFilename != None)
assert (readsFilespecs != None)
assert (jobDirectory != None)
assert (resultsDirectory != None)
if (len(readsFilespecs) > 1) and (subsampleFileSpec != None):
assert ("{reads}" in subsampleFileSpec)
# perform filename substitution
if (basePath != None) and (basePath.endswith("/")):
basePath = basePath[:-1]
refFilename = do_filename_substitutition(refFilename)
for (ix,filename) in enumerate(readsFilespecs):
readsFilespecs[ix] = do_filename_substitutition(filename)
if (aligner != "mem unpaired"):
assert ("{mate}" in readsFilespecs[ix])
if (subsampleFileSpec != None):
subsampleFileSpec = do_filename_substitutition(subsampleFileSpec)
jobDirectory = do_filename_substitutition(jobDirectory)
if (jobDirectory.endswith("/")):
jobDirectory = jobDirectory[:-1]
resultsDirectory = do_filename_substitutition(resultsDirectory)
if (resultsDirectory.endswith("/")):
resultsDirectory = resultsDirectory[:-1]
if (bwaProgramName == None):
bwaProgramName = "bwa"
else:
bwaProgramName = do_filename_substitutition(bwaProgramName)
for command in commandParams:
for (ix,param) in enumerate(commandParams[command]):
commandParams[command][ix] = do_filename_substitutition(param)
for (ix,bashInitializer) in enumerate(bashInitializers):
bashInitializer = do_filename_substitutition(bashInitializer)
bashInitializers[ix] = bashInitializer
if (aligner == "aln"):
assert ("mem" not in commandParams)
assert ("mem" not in maxMemory)
assert ("mem" not in numThreads)
elif (aligner == "mem"):
assert ("aln" not in commandParams)
assert ("aln" not in maxMemory)
assert ("aln" not in numThreads)
assert ("sampe" not in commandParams)
assert ("sampe" not in maxMemory)
assert ("sampe" not in numThreads)
elif (aligner == "mem unpaired"):
assert ("aln" not in commandParams)
assert ("aln" not in maxMemory)
assert ("aln" not in numThreads)
assert ("sampe" not in commandParams)
assert ("sampe" not in maxMemory)
assert ("sampe" not in numThreads)
if (phred64to33):
assert (aligner in ["mem","mem unpaired"])
# convert memory specifications
if ("aln:ulimit" in maxMemory):
multiplier = 1
val = maxMemory["aln"]
if (val.startswith("{threads}*")):
if ("aln" in numThreads): multiplier = numThreads["aln"]
val = val.replace("{threads}*","")
elif (val.endswith("*{threads}")):
if ("aln" in numThreads): multiplier = numThreads["aln"]
val = val.replace("*{threads}","")
maxMemory["aln"] = int_with_unit(val) * multiplier
elif ("aln" in maxMemory):
val = maxMemory["aln"]
maxMemory["aln"] = int_with_unit(val)
if ("sampe:ulimit" in maxMemory):
multiplier = 1
val = maxMemory["sampe"]
if (val.startswith("{threads}*")):
if ("sampe" in numThreads): multiplier = numThreads["sampe"]
val = val.replace("{threads}*","")
elif (val.endswith("*{threads}")):
if ("sampe" in numThreads): multiplier = numThreads["sampe"]
val = val.replace("*{threads}","")
maxMemory["sampe"] = int_with_unit(val) * multiplier
elif ("sampe" in maxMemory):
val = maxMemory["sampe"]
maxMemory["sampe"] = int_with_unit(val)
if ("mem:ulimit" in maxMemory):
multiplier = 1
val = maxMemory["mem"]
if (val.startswith("{threads}*")):
if ("mem" in numThreads): multiplier = numThreads["mem"]
val = val.replace("{threads}*","")
elif (val.endswith("*{threads}")):
if ("mem" in numThreads): multiplier = numThreads["mem"]
val = val.replace("*{threads}","")
maxMemory["mem"] = int_with_unit(val) * multiplier
elif ("mem" in maxMemory):
val = maxMemory["mem"]
maxMemory["mem"] = int_with_unit(val)
# read the sub-block tabulation files, to determine their lengths
if ("subblocks" in debug):
print >>stderr, "subsampleFileSpec = %s" % subsampleFileSpec
print >>stderr, "subsampleBlockSize = %d" % subsampleBlockSize
readsToTabulation = None
if (subsampleFileSpec == None):
subsampleN = None
else:
readsToTabulation = {}
subsampleN = {}
subsampleW = 1
for readsFilespec in readsFilespecs:
# nota bene: we assume the tabulation files for both mates have the
# same number of lines
(_,_,readsMatespec) = interpret_filespec(readsFilespec,replaceMate=False)
subsampleFilename = subsampleFileSpec.replace("{reads}",readsMatespec)
subsampleFilename = subsampleFilename.replace("{mate}", "1")
numLines = number_of_lines_in(subsampleFilename)
readsToTabulation[readsFilespec] = subsampleFilename
subLinesList = []
for startLine in xrange(0,numLines,subsampleBlockSize):
endLine = min(startLine+subsampleBlockSize,numLines)
subLinesList += ["%d..%d" % (startLine+1,endLine)]
subsampleN[readsFilespec] = subLinesList
subsampleW = max(subsampleW,len(str(len(subLinesList))))
if ("subblocks" in debug):
print >>stderr, "subsampleN[%s] = #%d [%s]" \
% (readsFilespec,len(subLinesList),",".join(subLinesList))
# write the jobs
jobs = []
jobNumber = -1
if (aligner == "aln"):
for jobSpec in job_specs(readsFilespecs,mateSet=[1,2,"P"],subsampleN=subsampleN):
jobNumber += 1
(readsFilespec,mate,subK,subLines) = jobSpec
if (subK != None): subK = "%0*d" % (subsampleW,subK)
if (mate == 1): dependencies = [jobNumber]
elif (mate != "P"): dependencies += [jobNumber]
if (type(mate) == int):
jobInfo = create_aln_job(readsFilespec,mate,subK,subLines)
elif (mate == "P"):
jobInfo = create_sampe_job(readsFilespec,subK,subLines,dependencies)
else:
assert (False), "Internal Error: mate = \"%s\"" % mate
jobs += [jobInfo]
elif (aligner == "mem"):
for jobSpec in job_specs(readsFilespecs,mateSet=["P"],subsampleN=subsampleN):
jobNumber += 1
(readsFilespec,mate,subK,subLines) = jobSpec
if (subK != None): subK = "%0*d" % (subsampleW,subK)
jobInfo = create_mem_job(readsFilespec,subK,subLines)
jobs += [jobInfo]
elif (aligner == "mem unpaired"):
for jobSpec in job_specs(readsFilespecs,mateSet=[None],subsampleN=subsampleN):
jobNumber += 1
(readsFilespec,mate,subK,subLines) = jobSpec
if (subK != None): subK = "%0*d" % (subsampleW,subK)
jobInfo = create_mem_unpaired_job(readsFilespec,subK,subLines)
jobs += [jobInfo]
# print the jobs list file
fn = [jobDirectory,"/"]
if (jobId != None): fn += [jobId]
fn += [".bwa_map"]
fn += [".job_list"]
fn += [".txt"]
fn = "".join(fn)
print >>stderr, "writing \"%s\"" % fn
f = file(fn, "wt")
print >>f, "\n".join(jobs)
f.close()
def create_aln_job(readsFilespec,mate,subK,subLines):
(dataset,readsId,_) = interpret_filespec(readsFilespec)
readsFilename = do_reads_filename_substitutition(readsFilespec,mate)
# determine job name
jobName = []
if (jobId != None): jobName += [jobId]
jobName += [dataset]
if (subK != None): jobName += [subK]
jobName += [str(mate)]
jobName = ".".join(jobName)
resultsFilename = resultsDirectory + "/" + jobName + ".sai"
if (giveJobsNames):
shortJobName = []
if (readsId != None): shortJobName += [readsId]
else: shortJobName += [str(jobNumber)]
if (subK != None): shortJobName += [subK]
shortJobName += [str(mate)]
shortJobName = "_".join(shortJobName)
# create the job file, and the entry for the jobs list
fn = jobDirectory + "/" + jobName + ".sh"
print >>stderr, "writing \"%s\"" % fn
jobF = file(fn,"wt")
jobInfo = []
if (giveJobsNames): jobInfo += ["%d %s %s" % (jobNumber,shortJobName,fn)]
else: jobInfo += ["%d %s" % (jobNumber,fn)]
if ("aln" in maxMemory): jobInfo += ["--memory=%d" % int(ceil(maxMemory["aln"]/1000000000.0))]
if ("aln" in numThreads): jobInfo += ["--cores=%d" % numThreads["aln"]]
# write the job file
if (bashInitializers == None): myBashInitializers = []
else: myBashInitializers = [x for x in bashInitializers]
if ("aln" in maxMemory):
memPages = int(ceil(maxMemory["aln"]/1024.0))
myBashInitializers += ["ulimit -v %d # %s bytes" \
% (memPages,commatize(memPages*1024))]
if (myBashInitializers != []):
print >>jobF, "\n".join(myBashInitializers)
print >>jobF
pipe = []
if (subLines != None):
command = ["extract_block"]
command += ["--block=%s.tabulated:%s" % (readsFilename,subLines)]
command += ["--path=%s" % readsFilename]
pipe += [" \\\n ".join(command)]
command = ["%s aln " % bwaProgramName]
if ("aln" in commandParams): command += [" ".join(commandParams["aln"])]
if ("aln" in numThreads): command += ["-t %d" % numThreads["aln"]]
command += [refFilename]
if (pipe == []): command += [readsFilename]
else: command += ["/dev/stdin"]
if (pipe == []): pipe += [" \\\n ".join(command)]
else: pipe += [" | %s" % "\\\n ".join(command)]
pipe += [" > %s" % resultsFilename]
print >>jobF, "time " + " \\\n".join(pipe)
jobF.close()
return " ".join(jobInfo)
def create_sampe_job(readsFilespec,subK,subLines,dependencies):
(dataset,readsId,_) = interpret_filespec(readsFilespec)
readsFilename1 = do_reads_filename_substitutition(readsFilespec,1)
readsFilename2 = do_reads_filename_substitutition(readsFilespec,2)
if (dependencies == []): dependencies = None
# determine job name
jobName = []
if (jobId != None): jobName += [jobId]
jobName += [dataset]
if (subK != None): jobName += [subK]
jobName += ["{mate}"]
jobName = ".".join(jobName)
if (outputAs == "bam"): resultsExt = ".bam"
elif (outputAs == "sorted bam"): resultsExt = "" # (samtools will add ".bam")
else: resultsExt = ".sam"
resultsFilename1 = resultsDirectory + "/" + jobName.replace(".{mate}",".1") + ".sai"
resultsFilename2 = resultsDirectory + "/" + jobName.replace(".{mate}",".2") + ".sai"
resultsFilename = resultsDirectory + "/" + jobName.replace(".{mate}","") + resultsExt
jobName = jobName.replace(".{mate}",".P")
if (giveJobsNames):
shortJobName = []
if (readsId != None): shortJobName += [readsId]
else: shortJobName += [str(jobNumber)]
if (subK != None): shortJobName += [subK]
shortJobName += ["P"]
shortJobName = "_".join(shortJobName)
# create the job file, and the entry for the jobs list
fn = jobDirectory + "/" + jobName + ".sh"
print >>stderr, "writing \"%s\"" % fn
jobF = file(fn,"wt")
jobInfo = []
if (giveJobsNames): jobInfo += ["%d %s %s" % (jobNumber,shortJobName,fn)]
else: jobInfo += ["%d %s" % (jobNumber,fn)]
if (dependencies != None): jobInfo += ["--depend=%s" % ",".join([str(x) for x in dependencies])]
if ("sampe" in maxMemory): jobInfo += ["--memory=%d" % int(ceil(maxMemory["sampe"]/1000000000.0))]
if ("sampe" in numThreads): jobInfo += ["--cores=%d" % numThreads["sampe"]]
# write the job file
if (bashInitializers == None): myBashInitializers = []
else: myBashInitializers = [x for x in bashInitializers]
if ("sampe" in maxMemory):
memPages = int(ceil(maxMemory["sampe"]/1024.0))
myBashInitializers += ["ulimit -v %d # %s bytes" \
% (memPages,commatize(memPages*1024))]
if (myBashInitializers != []):
print >>jobF, "\n".join(myBashInitializers)
print >>jobF
pipe = []
command = ["%s sampe" % bwaProgramName]
if ("sampe" in commandParams): command += [" ".join(commandParams["sampe"])]
if (readGroupInfo != None): command += ["-r \"@RG\\t%s\"" % readGroupInfo]
if ("sampe" in numThreads): command += ["-t %d" % numThreads["sampe"]]
command += [refFilename]
command += [resultsFilename1]
command += [resultsFilename2]
if (subLines == None):
command += [readsFilename1]
command += [readsFilename2]
else:
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename1,subLines)]
command += [" --path=%s)" % readsFilename1]
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename2,subLines)]
command += [" --path=%s)" % readsFilename2]
pipe += [" \\\n ".join(command)]
if (outputAs == "bam"):
pipe += [" | samtools view -Sb /dev/stdin"]
pipe += [" > %s" % resultsFilename]
elif (outputAs == "sorted bam"):
pipe += [" | samtools view -Su -"]
sortParams = ""
if ("sort" in commandParams):
jobIdForSort = jobId
if (subK != None): jobIdForSort += "." + subK
sortParams = " ".join(commandParams["sort"])
sortParams = sortParams.replace("{base}",basePath)
sortParams = sortParams.replace("{id}",jobIdForSort)
pipe += [" | samtools sort %s - -o %s.bam" % (sortParams,resultsFilename)]
else:
pipe += [" > %s" % resultsFilename]
print >>jobF, "time " + " \\\n".join(pipe)
jobF.close()
return " ".join(jobInfo)
def create_mem_job(readsFilespec,subK,subLines):
(dataset,readsId,_) = interpret_filespec(readsFilespec)
readsFilename1 = do_reads_filename_substitutition(readsFilespec,1)
readsFilename2 = do_reads_filename_substitutition(readsFilespec,2)
# determine job name
jobName = []
if (jobId != None): jobName += [jobId]
jobName += [dataset]
if (subK != None): jobName += [subK]
jobName += ["{mate}"]
jobName = ".".join(jobName)
if (outputAs == "bam"): resultsExt = ".bam"
elif (outputAs == "sorted bam"): resultsExt = "" # (samtools will add ".bam")
else: resultsExt = ".sam"
resultsFilename = resultsDirectory + "/" + jobName.replace(".{mate}","") + resultsExt
jobName = jobName.replace(".{mate}",".P")
if (giveJobsNames):
shortJobName = []
if (readsId != None): shortJobName += [readsId]
else: shortJobName += [str(jobNumber)]
if (subK != None): shortJobName += [subK]
shortJobName += ["P"]
shortJobName = "_".join(shortJobName)
# create the job file, and the entry for the jobs list
fn = jobDirectory + "/" + jobName + ".sh"
print >>stderr, "writing \"%s\"" % fn
jobF = file(fn,"wt")
jobInfo = []
if (giveJobsNames): jobInfo += ["%d %s %s" % (jobNumber,shortJobName,fn)]
else: jobInfo += ["%d %s" % (jobNumber,fn)]
if ("mem" in maxMemory): jobInfo += ["--memory=%d" % int(ceil(maxMemory["mem"]/1000000000.0))]
if ("mem" in numThreads): jobInfo += ["--cores=%d" % numThreads["mem"]]
# write the job file
if (bashInitializers == None): myBashInitializers = []
else: myBashInitializers = [x for x in bashInitializers]
if ("mem" in maxMemory):
memPages = int(ceil(maxMemory["mem"]/1024.0))
myBashInitializers += ["ulimit -v %d # %s bytes" \
% (memPages,commatize(memPages*1024))]
if (myBashInitializers != []):
print >>jobF, "\n".join(myBashInitializers)
print >>jobF
mustUnzip = (readsFilename1.endswith(".gz") or readsFilename1.endswith(".gzip"))
pipe = []
command = ["%s mem" % bwaProgramName]
if ("mem" in commandParams): command += [" ".join(commandParams["mem"])]
if ("mem" in numThreads): command += ["-t %d" % numThreads["mem"]]
command += [refFilename]
if (subLines == None) and (not phred64to33) and (not mustUnzip):
command += [readsFilename1]
command += [readsFilename2]
elif (subLines == None) and (phred64to33) and (not mustUnzip):
command += ["<(cat %s" % readsFilename1]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
command += ["<(cat %s" % readsFilename2]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
if (subLines == None) and (not phred64to33) and (mustUnzip):
command += ["<(gzip -dc %s)" % readsFilename1]
command += ["<(gzip -dc %s)" % readsFilename2]
elif (subLines == None) and (phred64to33) and (mustUnzip):
command += ["<(gzip -dc %s" % readsFilename1]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
command += ["<(gzip -dc %s" % readsFilename2]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
elif (subLines != None) and (not phred64to33) and (not mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename1,subLines)]
command += [" --path=%s)" % readsFilename1]
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename2,subLines)]
command += [" --path=%s)" % readsFilename2]
elif (subLines != None) and (phred64to33) and (not mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename1,subLines)]
command += [" --path=%s" % readsFilename1]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename2,subLines)]
command += [" --path=%s" % readsFilename2]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
elif (subLines != None) and (not phred64to33) and (mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename1,subLines)]
command += [" --path=%s" % readsFilename1]
command += [" | gzip -dc)"]
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename2,subLines)]
command += [" --path=%s" % readsFilename2]
command += [" | gzip -dc)"]
elif (subLines != None) and (phred64to33) and (mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename1,subLines)]
command += [" --path=%s" % readsFilename1]
command += [" | gzip -dc"]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename2,subLines)]
command += [" --path=%s" % readsFilename2]
command += [" | gzip -dc"]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
pipe += [" \\\n ".join(command)]
if (outputAs == "bam"):
pipe += [" | samtools view -Sb /dev/stdin"]
pipe += [" > %s" % resultsFilename]
elif (outputAs == "sorted bam"):
pipe += [" | samtools view -Su -"]
sortParams = ""
if ("sort" in commandParams):
jobIdForSort = jobId
if (subK != None): jobIdForSort += "." + subK
sortParams = " ".join(commandParams["sort"])
sortParams = sortParams.replace("{base}",basePath)
sortParams = sortParams.replace("{id}",jobIdForSort)
pipe += [" | samtools sort %s - -o %s.bam" % (sortParams,resultsFilename)]
else:
pipe += [" > %s" % resultsFilename]
print >>jobF, "time " + " \\\n".join(pipe)
jobF.close()
return " ".join(jobInfo)
def create_mem_unpaired_job(readsFilespec,subK,subLines):
(dataset,readsId,_) = interpret_filespec(readsFilespec,replaceMate=False)
readsFilename = do_reads_filename_substitutition(readsFilespec,None)
# determine job name
jobName = []
if (jobId != None): jobName += [jobId]
jobName += [dataset]
if (subK != None): jobName += [subK]
jobName = ".".join(jobName)
if (outputAs == "bam"): resultsExt = ".bam"
elif (outputAs == "sorted bam"): resultsExt = "" # (samtools will add ".bam")
else: resultsExt = ".sam"
resultsFilename = resultsDirectory + "/" + jobName + resultsExt
if (giveJobsNames):
shortJobName = []
if (readsId != None): shortJobName += [readsId]
else: shortJobName += [str(jobNumber)]
if (subK != None): shortJobName += [subK]
shortJobName = "_".join(shortJobName)
# create the job file, and the entry for the jobs list
fn = jobDirectory + "/" + jobName + ".sh"
print >>stderr, "writing \"%s\"" % fn
jobF = file(fn,"wt")
jobInfo = []
if (giveJobsNames): jobInfo += ["%d %s %s" % (jobNumber,shortJobName,fn)]
else: jobInfo += ["%d %s" % (jobNumber,fn)]
if ("mem" in maxMemory): jobInfo += ["--memory=%d" % int(ceil(maxMemory["mem"]/1000000000.0))]
if ("mem" in numThreads): jobInfo += ["--cores=%d" % numThreads["mem"]]
# write the job file
if (bashInitializers == None): myBashInitializers = []
else: myBashInitializers = [x for x in bashInitializers]
if ("mem" in maxMemory):
memPages = int(ceil(maxMemory["mem"]/1024.0))
myBashInitializers += ["ulimit -v %d # %s bytes" \
% (memPages,commatize(memPages*1024))]
if (myBashInitializers != []):
print >>jobF, "\n".join(myBashInitializers)
print >>jobF
mustUnzip = (readsFilename.endswith(".gz") or readsFilename.endswith(".gzip"))
pipe = []
command = ["%s mem" % bwaProgramName]
if ("mem" in commandParams): command += [" ".join(commandParams["mem"])]
if ("mem" in numThreads): command += ["-t %d" % numThreads["mem"]]
command += [refFilename]
if (subLines == None) and (not phred64to33) and (not mustUnzip):
command += [readsFilename]
elif (subLines == None) and (phred64to33) and (not mustUnzip):
command += ["<(cat %s" % readsFilename]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
if (subLines == None) and (not phred64to33) and (mustUnzip):
command += ["<(gzip -dc %s)" % readsFilename]
elif (subLines == None) and (phred64to33) and (mustUnzip):
command += ["<(gzip -dc %s" % readsFilename]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
elif (subLines != None) and (not phred64to33) and (not mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename,subLines)]
command += [" --path=%s)" % readsFilename]
elif (subLines != None) and (phred64to33) and (not mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename,subLines)]
command += [" --path=%s" % readsFilename]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
elif (subLines != None) and (not phred64to33) and (mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename,subLines)]
command += [" --path=%s" % readsFilename]
command += [" | gzip -dc)"]
elif (subLines != None) and (phred64to33) and (mustUnzip):
command += ["<(extract_block"]
command += [" --block=%s.tabulated:%s" % (readsFilename,subLines)]
command += [" --path=%s" % readsFilename]
command += [" | gzip -dc"]
command += [" | fastq_convert_phred --from=phred+64 --to=phred+33)"]
pipe += [" \\\n ".join(command)]
if (outputAs == "bam"):
pipe += [" | samtools view -Sb /dev/stdin"]
pipe += [" > %s" % resultsFilename]
elif (outputAs == "sorted bam"):
pipe += [" | samtools view -Su -"]
sortParams = ""
if ("sort" in commandParams):
jobIdForSort = jobId
if (subK != None): jobIdForSort += "." + subK
sortParams = " ".join(commandParams["sort"])
sortParams = sortParams.replace("{base}",basePath)
sortParams = sortParams.replace("{id}",jobIdForSort)
pipe += [" | samtools sort %s - -o %s.bam" % (sortParams,resultsFilename)]
else:
pipe += [" > %s" % resultsFilename]
print >>jobF, "time " + " \\\n".join(pipe)
jobF.close()
return " ".join(jobInfo)
def do_filename_substitutition(s):
if ("{base}" in s):
assert (basePath != None)
s = s.replace("{base}",basePath)
return s
def do_reads_filename_substitutition(filespec,mate):
if (":" in filespec):
saveFilespec = filespec
numFields = len(filespec.split(":"))
assert (numFields == 2)
filespec = filespec.split(":",1)[1]
if (filespec == ""): filespec = saveFilespec
if (mate != None):
assert ("{mate}" in filespec)
filespec = filespec.replace("{mate}",str(mate))
return filespec
fileSpecRe = compile("(?P<mate>_*\{mate\}_*)")
def interpret_filespec(filespec,replaceMate=True):
readsId = None
if (":" in filespec):
saveFilespec = filespec
numFields = len(filespec.split(":"))
assert (numFields == 2)
(readsId,filespec) = filespec.split(":",1)
if (readsId == ""): readsId = None
if (filespec == ""): (readsId,filespec) = (None,saveFilespec)
if (readsId != None):
if (jobId != None): readsId = readsId.replace("{id}",jobId)
saveFilespec = filespec
if ("/" in filespec):
filespec = filespec.split("/")[-1]
if (filespec.endswith(".fastq")):
filespec = filespec[:filespec.rfind(".fastq")]
elif (filespec.endswith(".fq")):
filespec = filespec[:filespec.rfind(".fq")]
dataset = filespec
if (replaceMate):
m = fileSpecRe.search(filespec) # find first match
assert (m != None)
(sIx,eIx) = (m.start(),m.end())
m = fileSpecRe.search(filespec[eIx:]) # make sure there's no other match
assert (m == None)
replacement = ""
if (sIx != 0) and (eIx != len(filespec)): replacement = "_"
dataset = filespec[:sIx] + replacement + filespec[eIx:]
while (dataset[-1] in [".","_"]):
dataset = dataset[:-1]
return (dataset,readsId,saveFilespec)
def job_specs(readsFilespecs,mateSet=2,subsampleN=None):
if (type(mateSet) == int):
assert (mateSet > 0)
else:
assert (mateSet != [])
if (subsampleN != None):
assert (type(subsampleN) == dict)
if (type(mateSet) != int):
for readFilespec in readsFilespecs:
if (subsampleN == None):
for mate in mateSet:
yield (readFilespec,mate,None,None)
else:
for (subK,subLines) in enumerate(subsampleN[readFilespec]):
for mate in mateSet:
yield (readFilespec,mate,subK+1,subLines)
elif (mateSet == 1):
for readFilespec in readsFilespecs:
if (subsampleN == None):
yield (readFilespec,None,None,None)
else:
for (subK,subLines) in enumerate(subsampleN[readFilespec]):
yield (readFilespec,None,subK+1,subLines)
else:
for readFilespec in readsFilespecs:
if (subsampleN == None):
for mate in xrange(1,mateSet+1):
yield (readFilespec,mate,None,None)
else:
for (subK,subLines) in enumerate(subsampleN[readFilespec]):
for mate in xrange(1,mateSet+1):
yield (readFilespec,mate,subK+1,subLines)
def number_of_lines_in(filename):
f = file(filename,"rt")
numLines = 0
for line in f:
numLines += 1
f.close()
return numLines
def int_with_unit(s):
if (s.endswith("K")):
multiplier = 1000
s = s[:-1]
elif (s.endswith("M")):
multiplier = 1000 * 1000
s = s[:-1]
elif (s.endswith("G")):
multiplier = 1000 * 1000 * 1000
s = s[:-1]
else:
multiplier = 1
try: return int(s) * multiplier
except ValueError: return int(ceil(float(s) * multiplier))
def commatize(val):
if (val >= 0): sign = ""
else: (sign,val) = ("-",-val)
val = str(val)
digits = len(val)
if (digits > 3):
leader = digits % 3
chunks = []
if (leader != 0):
chunks += [val[:leader]]
chunks += [val[ix:ix+3] for ix in xrange(leader,digits,3)]
val = ",".join(chunks)
return sign + val
if __name__ == "__main__": main()
|
"""
This modules provides classes and functions for using the kombine sampler
packages for parameter estimation.
"""
import numpy
from pycbc.inference.sampler_base import BaseMCMCSampler
class KombineSampler(BaseMCMCSampler):
"""This class is used to construct the MCMC sampler from the kombine
package.
Parameters
----------
likelihood_evaluator : likelihood class
An instance of the likelihood class from the
pycbc.inference.likelihood module.
nwalkers : int
Number of walkers to use in sampler.
transd : bool
If True, the sampler will operate across parameter spaces using a
kombine.clustered_kde.TransdimensionalKDE proposal distribution. In
this mode a masked array with samples in each of the possible sets of
dimensions must be given for the initial ensemble distribution.
processes : {None, int}
Number of processes to use with multiprocessing. If None, all available
cores are used.
min_burn_in : {None, int}
Set the minimum number of burn in iterations to use. If None,
`burn_in_iterations` will be initialized to `0`.
update_interval : {None, int}
Make the sampler update the proposal densities every `update_interval`
iterations.
"""
name = "kombine"
def __init__(self, likelihood_evaluator, nwalkers, transd=False,
min_burn_in=None, pool=None, likelihood_call=None,
update_interval=None):
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
if likelihood_call is None:
likelihood_call = likelihood_evaluator
# construct sampler for use in KombineSampler
ndim = len(likelihood_evaluator.waveform_generator.variable_args)
sampler = kombine.Sampler(nwalkers, ndim, likelihood_call,
transd=transd, pool=pool,
processes=pool.count)
# initialize
super(KombineSampler, self).__init__(sampler, likelihood_evaluator,
min_burn_in=min_burn_in)
self._nwalkers = nwalkers
self.update_interval = update_interval
@classmethod
def from_cli(cls, opts, likelihood_evaluator, pool=None, likelihood_call=None):
"""Create an instance of this sampler from the given command-line
options.
Parameters
----------
opts : ArgumentParser options
The options to parse.
likelihood_evaluator : LikelihoodEvaluator
The likelihood evaluator to use with the sampler.
Returns
-------
KombineSampler
A kombine sampler initialized based on the given arguments.
"""
return cls(likelihood_evaluator, opts.nwalkers, likelihood_call=likelihood_call,
min_burn_in=opts.min_burn_in, pool=pool, update_interval=opts.update_interval)
def run(self, niterations, **kwargs):
"""Advance the sampler for a number of samples.
Parameters
----------
niterations : int
Number of samples to get from sampler.
Returns
-------
p : numpy.array
An array of current walker positions with shape (nwalkers, ndim).
lnpost : numpy.array
The list of log posterior probabilities for the walkers at
positions p, with shape (nwalkers, ndim).
lnprop : numpy.array
The list of log proposal densities for the walkers at positions p,
with shape (nwalkers, ndim).
"""
blob0 = None
if self.niterations == 0:
# first time running, use the initial positions
p0 = self.p0
if self.likelihood_evaluator.return_meta:
blob0 = [self.likelihood_evaluator(p0[wi, :])[1]
for wi in range(self.nwalkers)]
else:
p0 = None
# kombine requires blob data to be specified
blob0 = self._currentblob
kwargs['blob0'] = blob0
if 'update_interval' not in kwargs:
# use the internal update interval
kwargs['update_interval'] = self.update_interval
res = self._sampler.run_mcmc(niterations, p0=p0, **kwargs)
p, lnpost, lnprop = res[0], res[1], res[2]
# update the positions
self._pos = p
if self.likelihood_evaluator.return_meta:
self._currentblob = self._sampler.blobs[-1]
return p, lnpost, lnprop
@property
def lnpost(self):
""" Get the natural logarithm of the likelihood as an
nwalkers x niterations array.
"""
# kombine returns niterations x nwaklers
return self._sampler.lnpost.transpose()
@property
def chain(self):
"""Get all past samples as an nwalker x niterations x ndim array."""
# kombine returns niterations x nwalkers x ndim
return self._sampler.chain.transpose((1, 0, 2))
def clear_chain(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
# kombine stores its chain as niterations x nwalkers x ndim
current_shape = self._sampler._chain.shape
new_shape = (0, current_shape[1], current_shape[2])
if isinstance(self._sampler._chain, numpy.ma.MaskedArray):
self._sampler._chain = numpy.ma.resize(self._sampler._chain,
new_shape)
else:
self._sampler._chain.resize(new_shape)
self._sampler.stored_iterations = 0
# clear the blobs
self._sampler._blobs = []
def burn_in(self):
"""Use kombine's `burnin` routine to advance the sampler.
If a minimum number of burn-in iterations was specified, this will run
the burn-in until it has advanced at least as many steps as desired.
The initial positions (p0) must be set prior to running.
For more details, see `kombine.sampler.burnin`.
Returns
-------
p : numpy.array
An array of current walker positions with shape (nwalkers, ndim).
lnpost : numpy.array
The list of log posterior probabilities for the walkers at
positions p, with shape (nwalkers, ndim).
lnprop : numpy.array
The list of log proposal densities for the walkers at positions p,
with shape (nwalkers, ndim).
"""
# check that we haven't already burned in
if self.pos is not None:
raise ValueError("burn-in already run")
# run once
p0 = self.p0
if self.likelihood_evaluator.return_meta:
blob0 = [self.likelihood_evaluator(p0[wi, :])[1]
for wi in range(self.nwalkers)]
else:
blob0 = None
res = self._sampler.burnin(self.p0, blob0=blob0)
p, post, q = res[0], res[1], res[2]
# continue running until minimum burn in is satistfied
while self.niterations < self.burn_in_iterations:
p0 = p
res = self._sampler.burnin(p0)
p, post, q = res[0], res[1], res[2]
# update position
self._pos = p
self._currentblob = self._sampler.blobs[-1]
self.burn_in_iterations = self.niterations
return p, post, q
|
i = input()
while i != 10:
print i
i = i + 1
print input()
|
import sys, os, threading, time, logging, select, Queue
import perf
log = logging.getLogger("ashd.serve")
seq = 1
seqlk = threading.Lock()
def reqseq():
global seq
with seqlk:
s = seq
seq += 1
return s
class closed(IOError):
def __init__(self):
super(closed, self).__init__("The client has closed the connection.")
class reqthread(threading.Thread):
def __init__(self, name=None, **kw):
if name is None:
name = "Request handler %i" % reqseq()
super(reqthread, self).__init__(name=name, **kw)
class wsgirequest(object):
def __init__(self, handler):
self.status = None
self.headers = []
self.respsent = False
self.handler = handler
self.buffer = bytearray()
def handlewsgi(self):
raise Exception()
def fileno(self):
raise Exception()
def writehead(self, status, headers):
raise Exception()
def flush(self):
raise Exception()
def close(self):
pass
def writedata(self, data):
self.buffer.extend(data)
def flushreq(self):
if not self.respsent:
if not self.status:
raise Exception("Cannot send response body before starting response.")
self.respsent = True
self.writehead(self.status, self.headers)
def write(self, data):
if not data:
return
self.flushreq()
self.writedata(data)
self.handler.ckflush(self)
def startreq(self, status, headers, exc_info=None):
if self.status:
if exc_info:
try:
if self.respsent:
raise exc_info[1]
finally:
exc_info = None
else:
raise Exception("Can only start responding once.")
self.status = status
self.headers = headers
return self.write
class handler(object):
def handle(self, request):
raise Exception()
def ckflush(self, req):
p = select.poll()
p.register(req, select.POLLOUT)
while len(req.buffer) > 0:
p.poll()
req.flush()
def close(self):
pass
@classmethod
def parseargs(cls, **args):
if len(args) > 0:
raise ValueError("unknown handler argument: " + iter(args).next())
return {}
class single(handler):
cname = "single"
def handle(self, req):
try:
env = req.mkenv()
with perf.request(env) as reqevent:
respiter = req.handlewsgi(env, req.startreq)
for data in respiter:
req.write(data)
if req.status:
reqevent.response([req.status, req.headers])
req.flushreq()
self.ckflush(req)
except closed:
pass
except:
log.error("exception occurred when handling request", exc_info=True)
finally:
req.close()
class freethread(handler):
cname = "free"
def __init__(self, max=None, timeout=None, **kw):
super(freethread, self).__init__(**kw)
self.current = set()
self.lk = threading.Lock()
self.tcond = threading.Condition(self.lk)
self.max = max
self.timeout = timeout
@classmethod
def parseargs(cls, max=None, abort=None, **args):
ret = super(freethread, cls).parseargs(**args)
if max:
ret["max"] = int(max)
if abort:
ret["timeout"] = int(abort)
return ret
def handle(self, req):
with self.lk:
if self.max is not None:
if self.timeout is not None:
now = start = time.time()
while len(self.current) >= self.max:
self.tcond.wait(start + self.timeout - now)
now = time.time()
if now - start > self.timeout:
os.abort()
else:
while len(self.current) >= self.max:
self.tcond.wait()
th = reqthread(target=self.run, args=[req])
th.registered = False
th.start()
while not th.registered:
self.tcond.wait()
def run(self, req):
try:
th = threading.current_thread()
with self.lk:
self.current.add(th)
th.registered = True
self.tcond.notify_all()
try:
env = req.mkenv()
with perf.request(env) as reqevent:
respiter = req.handlewsgi(env, req.startreq)
for data in respiter:
req.write(data)
if req.status:
reqevent.response([req.status, req.headers])
req.flushreq()
self.ckflush(req)
except closed:
pass
except:
log.error("exception occurred when handling request", exc_info=True)
finally:
with self.lk:
self.current.remove(th)
self.tcond.notify_all()
finally:
req.close()
def close(self):
while True:
with self.lk:
if len(self.current) > 0:
th = iter(self.current).next()
else:
return
th.join()
class resplex(handler):
cname = "rplex"
def __init__(self, max=None, **kw):
super(resplex, self).__init__(**kw)
self.current = set()
self.lk = threading.Lock()
self.tcond = threading.Condition(self.lk)
self.max = max
self.cqueue = Queue.Queue(5)
self.cnpipe = os.pipe()
self.rthread = reqthread(name="Response thread", target=self.handle2)
self.rthread.start()
@classmethod
def parseargs(cls, max=None, **args):
ret = super(resplex, cls).parseargs(**args)
if max:
ret["max"] = int(max)
return ret
def ckflush(self, req):
raise Exception("resplex handler does not support the write() function")
def handle(self, req):
with self.lk:
if self.max is not None:
while len(self.current) >= self.max:
self.tcond.wait()
th = reqthread(target=self.handle1, args=[req])
th.registered = False
th.start()
while not th.registered:
self.tcond.wait()
def handle1(self, req):
try:
th = threading.current_thread()
with self.lk:
self.current.add(th)
th.registered = True
self.tcond.notify_all()
try:
env = req.mkenv()
respobj = req.handlewsgi(env, req.startreq)
respiter = iter(respobj)
if not req.status:
log.error("request handler returned without calling start_request")
if hasattr(respiter, "close"):
respiter.close()
return
else:
self.cqueue.put((req, respiter))
os.write(self.cnpipe[1], " ")
req = None
finally:
with self.lk:
self.current.remove(th)
self.tcond.notify_all()
except closed:
pass
except:
log.error("exception occurred when handling request", exc_info=True)
finally:
if req is not None:
req.close()
def handle2(self):
try:
rp = self.cnpipe[0]
current = {}
def closereq(req):
respiter = current[req]
try:
if respiter is not None and hasattr(respiter, "close"):
respiter.close()
except:
log.error("exception occurred when closing iterator", exc_info=True)
try:
req.close()
except:
log.error("exception occurred when closing request", exc_info=True)
del current[req]
def ckiter(req):
respiter = current[req]
if respiter is not None:
rem = False
try:
data = respiter.next()
except StopIteration:
rem = True
try:
req.flushreq()
except:
log.error("exception occurred when handling response data", exc_info=True)
except:
rem = True
log.error("exception occurred when iterating response", exc_info=True)
if not rem:
if data:
try:
req.flushreq()
req.writedata(data)
except:
log.error("exception occurred when handling response data", exc_info=True)
rem = True
if rem:
current[req] = None
try:
if hasattr(respiter, "close"):
respiter.close()
except:
log.error("exception occurred when closing iterator", exc_info=True)
respiter = None
if respiter is None and not req.buffer:
closereq(req)
while True:
bufl = list(req for req in current.iterkeys() if req.buffer)
rls, wls, els = select.select([rp], bufl, [rp] + bufl)
if rp in rls:
ret = os.read(rp, 1024)
if not ret:
os.close(rp)
return
try:
while True:
req, respiter = self.cqueue.get(False)
current[req] = respiter
ckiter(req)
except Queue.Empty:
pass
for req in wls:
try:
req.flush()
except closed:
closereq(req)
except:
log.error("exception occurred when writing response", exc_info=True)
closereq(req)
else:
if len(req.buffer) < 65536:
ckiter(req)
except:
log.critical("unexpected exception occurred in response handler thread", exc_info=True)
os.abort()
def close(self):
while True:
with self.lk:
if len(self.current) > 0:
th = iter(self.current).next()
else:
break
th.join()
os.close(self.cnpipe[1])
self.rthread.join()
names = dict((cls.cname, cls) for cls in globals().itervalues() if
isinstance(cls, type) and
issubclass(cls, handler) and
hasattr(cls, "cname"))
def parsehspec(spec):
if ":" not in spec:
return spec, {}
nm, spec = spec.split(":", 1)
args = {}
while spec:
if "," in spec:
part, spec = spec.split(",", 1)
else:
part, spec = spec, None
if "=" in part:
key, val = part.split("=", 1)
else:
key, val = part, ""
args[key] = val
return nm, args
|
from enum import Enum, unique
import math
@unique
class Direction(Enum):
"""The six cardinal directions on a hexigonal grid
Named as <axis>_<sign>, so Q_POS is in the positive direction
on the q axis.
"""
Q_POS = 0
R_POS = 1
S_POS = 2
Q_NEG = 3
R_NEG = 4
S_NEG = 5
class HexCell:
"""A single cell in a hexagonal grid.
Attributes:
q, r, s - The coordinates of the hex.
"""
_direction_coord_change = {
Direction.Q_POS: (+1, +0, -1),
Direction.R_POS: (+0, +1, -1),
Direction.S_POS: (+1, -1, +0),
Direction.Q_NEG: (-1, +0, +1),
Direction.R_NEG: (+0, -1, +1),
Direction.S_NEG: (-1, +1, +0)
}
def __init__(self, q, r):
self.q = q
self.r = r
self.s = -q - r
def __eq__(self, other):
return self.has_same_coordinates(other)
def has_same_coordinates(self, other):
"""Same as __eq__, but accessible for derived classes."""
for member_name in ("q", "r", "s"):
if not self._are_equal_or_nan(
getattr(self, member_name),
getattr(other, member_name)):
return False
return True
def _are_equal_or_nan(self, first, second):
if math.isnan(first) and math.isnan(second):
return True
return first == second
def __hash__(self):
return hash((self.q, self.r, self.s))
def __str__(self):
return "(" + str(self.q) + "," + str(self.r) + "," + str(self.s) + ")"
def __repr__(self):
return str(self)
def __sub__(self, other):
return HexCell(
self.q - other.q,
self.r - other.r
)
def __add__(self, other):
return HexCell(
self.q + other.q,
self.r + other.r
)
def rotate_clockwise_about_origin(self):
return HexCell(-self.s, -self.q)
def rotate_counterclockwise_about_origin(self):
return HexCell(-self.r, -self.s)
def get_neighbors(self, hex_grid):
"""Get the cells adjacent to hex_cell in hex_grid in Direction order."""
return (self.get_neighbor(hex_grid, direction)
for direction in Direction)
def get_neighbor(self, hex_grid, direction):
"""Get the HexCell adjacent to hex_cell in the specified direction."""
coord_change = self._direction_coord_change[direction]
q = self.q + coord_change[0]
r = self.r + coord_change[1]
s = self.s + coord_change[2]
assert q + r + s == 0
return hex_grid.get_cell(q, r)
def get_offset_coords(self):
col = self.q + (self.r - (self.r & 1)) // 2
row = self.r
return (col, row)
@staticmethod
def from_offset_cords(offset_cords):
col, row = offset_cords
q = col - (row - (row & 1)) // 2
r = row
return HexCell(q, r)
|
import re
import serial.tools.list_ports
def main():
print "Listing all serial devices, pick the one you want and use the line in config.ini.\n"
for (port, name, hwid) in serial.tools.list_ports.comports():
print "\ndeviceN_port={0}".format(port)
if hwid not in ["n/a", None]:
print "or use the permanent URL:\ndeviceN_port=hwgrep://{0}".format(re.escape(hwid))
if __name__ == "__main__":
main()
|
"""
hdnet
~~~~~
Hopfield denoising network
:copyright: Copyright 2014, Christopher Hillar, Felix Effenberger
:license: GPLv3, see LICENSE for details.
"""
"""hdnet init code"""
__version__ = 'v0.1'
from .data import *
from .hopfield import *
from .learner import *
from .maths import *
from .patterns import *
from .sampling import *
from .spikes_model import *
from .spikes import *
from .stats import *
from .stimulus import *
from .util import *
from .visualization import *
from .spikes_model_validation import *
|
import numpy as np
import pystan
import statsmodels.api as sm
from scipy.stats import uniform, bernoulli, poisson
def ztp(N, lambda_):
"""Zero truncated Poisson distribution"""
temp = [poisson.pmf(0, item) for item in lambda_]
p = [uniform.rvs(loc=item, scale=1-item) for item in temp]
ztp = [int(poisson.ppf(p[i], lambda_[i])) for i in range(len(p))]
return np.array(ztp)
np.random.seed(141) # set seed to replicate example
nobs= 750 # number of obs in model
x1 = uniform.rvs(size=nobs)
xb = 1.0 + 4.0 * x1 # linear predictor, xb
exb = np.exp(xb)
poy = ztp(nobs, exb)
xc = -1.0 + 3.5 * x1 # construct filter
pi = 1.0/(1.0 + np.exp(xc))
bern = [bernoulli.rvs(1-pi[i]) for i in range(nobs)]
poy = [poy[i]*bern[i] for i in range(nobs)] # Add structural zeros
X = np.transpose(x1)
X = sm.add_constant(X)
mydata = {} # build data dictionary
mydata['Y'] = poy # response variable
mydata['N'] = nobs # sample size
mydata['Xb'] = X # predictors
mydata['Xc'] = X
mydata['Kb'] = X.shape[1] # number of coefficients
mydata['Kc'] = X.shape[1]
stan_code = """
data{
int<lower=0> N;
int<lower=0> Kb;
int<lower=0> Kc;
matrix[N, Kb] Xb;
matrix[N, Kc] Xc;
int<lower=0> Y[N];
}
parameters{
vector[Kc] beta;
vector[Kb] gamma;
real<lower=0, upper=5.0> r;
}
transformed parameters{
vector[N] mu;
vector[N] Pi;
mu = exp(Xc * beta);
for (i in 1:N) Pi[i] = inv_logit(Xb[i] * gamma);
}
model{
for (i in 1:N) {
(Y[i] == 0) ~ bernoulli(1-Pi[i]);
if (Y[i] > 0) Y[i] ~ poisson(mu[i]) T[1,];
}
}
"""
fit = pystan.stan(model_code=stan_code, data=mydata, iter=7000, chains=3,
warmup=4000, n_jobs=3)
nlines = 10 # number of lines in screen output
output = str(fit).split('\n')
for item in output[:nlines]:
print(item)
|
from dec import dec
from file2txt import file2txt
from dec2str import dec2str
from date2str import date2str
from dbsqlite import Db
def isoz(accounts, diax='.'):
'''
[['20.00.00',0, 15], ['38.00.00', 34, 67]]
'''
dv = {}
lena = len(accounts[0])
for acc in accounts:
ach = lmohier(acc[0], diax) # ['2', '20', '20.00', '20.00.00']
for el in ach:
tlist = dv.get(el, [dec(0) for i in range(lena - 1)])
for i, el2 in enumerate(acc[1:]):
tlist[i] += dec(el2)
dv[el] = tlist
return dv
def lmohier(lmo, diax='.'):
'''
Επιστρέφει λίστα με την ιεραρχία του λογαριασμού
πχ αν ο λογαριασμός είναι ο 38.00.00 επιστρέφει:
['total', '3', '38', '38.00', '38.00.00']
'''
hier = lmo.split(diax)
flist = ['total', hier[0][0]]
for i in range(len(hier)):
flist.append(diax.join(hier[:i+1]))
return flist
def dathier(dat):
'''
Επιστρέφει λίστα με την ιεραρχία ημερομηνίας
π.χ αν ημερομηνία = 2016-10-28 επιστρέφει:
['2016', '2016-10', '2016-10-28']
'''
year, month, date = dat.split('-')
return [year, '%s-%s' % (year, month), dat]
def isozpr(lmodic):
'''
Εκτύπωση ισοζυγίου λογαριασμών
'''
for key in sorted(lmodic.keys()):
alist = lmodic[key]
flist = []
flist.append(key)
for el in alist:
flist.append(dec2str(el))
# alist.insert(0, key)
alen = len(alist)
st = '{:16} ' + ('{:>16} ' * alen)
print(st.format(*flist))
def tst():
dapo = '2015-01-01'
deos = '2015-12-31'
sqlfile = './sql/selectp_isozygio1.sql'
db = Db('/home/tedlaz/tedfiles/prj/samaras15/sam2015.sql3')
sqlp = file2txt(sqlfile)
sql = sqlp.format(apo=dapo, eos=deos)
rws = db.rows(sql)
isz = isoz(rws)
print('Ισοζύγιο από %s έως %s' % (date2str(dapo), date2str(deos)))
isozpr(isz)
if __name__ == '__main__':
isz = isoz([['38.00.00', 10.22, 11.45, 0, 1],
['38.01.00', 2.36, 2.48, 1, 0],
['20.00.00', 34.45, 0, 0, 1],
['20.00.01', 22.45, 0, 0, 1],
['20.00.02', 34.89, 0, 0, 12.45],
['24.00.00', 1650.88, 0, 0, 13.76],
['24.00.01', 351.42, 0, 0, 0],
['40.00.02', 15, 0, 0, 0],
['40.00.00', 15, 0, 0, 0],
['40.00.01', 15, 0, 0, 0]
])
isozpr(isz)
print(dathier('2016-01-01'))
# tst()
|
import os
from GangaCore.Runtime.GPIexport import exportToGPI
from GangaCore.GPIDev.Base.Proxy import GPIProxyClassFactory
from GangaLHCb.Lib.Applications import AppsBaseUtils
from GangaCore.Utility.logging import getLogger
from .GaudiPython import GaudiPython
from .Bender import Bender
from .BenderScript import BenderScript
from .Ostap import Ostap
from .BenderBox import BenderModule, BenderRun, OstapRun
from .GaudiExec import GaudiExec
from GangaCore.Utility.Config import getConfig
logger = getLogger()
logger.debug("User Added Apps")
config = getConfig('LHCb')
user_added = config['UserAddedApplications']
user_apps = user_added.split(':')
if user_apps == user_added and len(user_added) > 0:
AppsBaseUtils.addNewLHCbapp(user_apps)
for app in user_apps:
if len(app) > 0:
AppsBaseUtils.addNewLHCbapp(app)
logger.debug("Constructing AppsBase Apps")
f = open(os.path.join(os.path.dirname(__file__), 'AppsBase.py'), 'r')
cls = f.read()
f.close()
all_apps = ''
for app in AppsBaseUtils.available_apps():
if app in dir():
continue
app = str(app)
this_app = cls.replace('AppName', app)
all_apps = all_apps + str('\n\n') + this_app
#exec(this_exec, all_global, all_local)
logger.debug("Adding %s" % str(app))
logger.debug("Adding apps")
modules= compile(all_apps, '<string>', 'exec')
exec(modules)
logger.debug("Fin")
|
"""
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data','datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
from numpy.core.multiarray import typeinfo, ndarray, array, \
empty, dtype, datetime_data, datetime_as_string, \
busday_offset, busday_count, is_busday, busdaycalendar
import types as _types
import sys
from builtins import bool, int, int, float, complex, object, str, str
from numpy.compat import bytes
if sys.version_info[0] >= 3:
# Py3K
class long(int):
# Placeholder class -- this will not escape outside numerictypes.py
pass
_all_chars = list(map(chr, list(range(256))))
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
elif name=='datetime64':
char = 'M'
elif name=='timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name=='bytes_':
char = 'S'
base = 'bytes'
elif name=='str_':
char = 'U'
base = 'str'
else:
if name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in list(typeinfo.keys()):
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in list(typeinfo.keys()):
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in list(allTypes.keys()):
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in list(allTypes.keys()):
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
void = allTypes['void']
generic = allTypes['generic']
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in list(typeinfo.keys()):
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p','P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool,object,str,str,void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int : 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
str: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError is one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print np.sctype2char(sctype)
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(list(_sctype2char_dict.keys()))
ScalarType = tuple(ScalarType)
for key in list(_sctype2char_dict.keys()):
cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
_typestr = _typedict()
for key in list(_sctype2char_dict.keys()):
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,),key).dtype.str[1:]
for key, val in list(_typestr.items()):
if val not in sctypeDict:
sctypeDict[val] = key
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
typeDict = sctypeDict
typeNA = sctypeNA
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a,b], start=thisind)
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc,maxa)
else:
return maxa
|
from django.contrib import admin
from .models import Repos
admin.site.register(Repos)
|
import sys
sys.path += ["../"]
from mingus.containers.NoteContainer import NoteContainer
from mingus.containers.Note import Note
import unittest
class test_NoteContainers(unittest.TestCase):
def setUp(self):
self.n1 = NoteContainer()
self.n2 = NoteContainer("A")
self.n3 = NoteContainer(["A", "C", "E"])
self.n4 = NoteContainer(["A", "C", "E", "F", "G"])
self.n5 = NoteContainer(["A", "C", "E", "F", "G", "A"])
def test_add_note(self):
self.assertEqual(self.n2 , self.n2.add_note("A"))
self.assertEqual(NoteContainer("A"), self.n1.add_note("A"))
self.n1 - "A"
self.assertEqual(self.n3 + ["F", "G"], self.n4)
self.assertEqual(self.n2 + ["C", "E"], self.n3 - ["F", "G"])
self.n2 - ["C", "E"]
def test_add_notes(self):
self.assertEqual(self.n3, self.n1.add_notes(["A", "C", "E"]))
self.n1.empty()
self.assertEqual(self.n3, self.n1.add_notes([["A", 4], ["C", 5], ["E", 5]]))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes(Note("A")))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes([Note("A")]))
self.n1.empty()
self.assertEqual(self.n2, self.n1.add_notes("A"))
self.n1.empty()
self.assertEqual(self.n3, self.n2 + NoteContainer([["C", 5], ["E", 5]]))
self.n2 = NoteContainer("A")
def test_remove_note(self):
n = NoteContainer(["C", "E", "G"])
n.remove_note("C")
self.assertEqual(NoteContainer(["E", "G"]), n)
n.remove_note("E")
self.assertEqual(NoteContainer(["G"]), n)
n.remove_note("G")
self.assertEqual(NoteContainer([]), n)
def test_determine(self):
n = NoteContainer(["C", "E", "G"])
self.assertEqual(["C major triad"], n.determine())
n.transpose("3")
self.assertEqual(["E major triad"], n.determine())
def test_remove_notes(self):
pass
def test_sort(self):
n1 = NoteContainer(["Eb", "Gb", "C"])
n2 = NoteContainer(["Eb", "Gb", "Cb"])
n1.sort()
n2.sort()
self.assertEqual(Note("Eb"), n1[0])
self.assertEqual(Note("Gb"), n2[1])
def test_getitem(self):
self.assertEqual(self.n2[0], Note("A"))
self.assertEqual(self.n3[0], Note("A"))
self.assertEqual(self.n4[0], Note("A"))
self.assertEqual(self.n4[1], Note("C", 5))
self.assertEqual(self.n4[2], Note("E", 5))
def test_transpose(self):
n = NoteContainer(["C", "E", "G"])
self.assertEqual(NoteContainer(["E", "G#", "B"]), n.transpose("3"))
n = NoteContainer(["C-6", "E-4", "G-2"])
self.assertEqual(NoteContainer(["E-6", "G#-4", "B-2"]), n.transpose("3"))
def test_get_note_names(self):
self.assertEqual(['A', 'C', 'E'], self.n3.get_note_names())
self.assertEqual(['A', 'C', 'E', 'F', 'G'], self.n4.get_note_names())
self.assertEqual(['A', 'C', 'E', 'F', 'G'], self.n5.get_note_names())
def test_from_chord_shorthand(self):
self.assertEqual(self.n3, NoteContainer().from_chord_shorthand("Am"))
def test_from_progression_shorthand(self):
self.assertEqual(self.n3, NoteContainer().from_progression_shorthand("VI"))
def test_from_interval_shorthand(self):
self.assertEqual(NoteContainer(['C-4', 'G-4']), NoteContainer().from_interval_shorthand("C", "5"))
self.assertEqual(NoteContainer(['F-3', 'C-4']), NoteContainer().from_interval_shorthand("C", "5", False))
def test_is_consonant(self):
self.assert_(NoteContainer().from_chord("Am").is_consonant())
self.assert_(NoteContainer().from_chord("C").is_consonant())
self.assert_(NoteContainer().from_chord("G").is_consonant())
self.assert_(NoteContainer().from_chord("Dm").is_consonant())
self.assert_(NoteContainer().from_chord("E").is_consonant())
self.assert_(not NoteContainer().from_chord("E7").is_consonant())
self.assert_(not NoteContainer().from_chord("Am7").is_consonant())
self.assert_(not NoteContainer().from_chord("Gdim").is_consonant())
def test_is_perfect_consonant(self):
self.assert_(NoteContainer(['A', 'E']).is_perfect_consonant())
self.assert_(NoteContainer(['A-4', 'A-6']).is_perfect_consonant())
self.assert_(NoteContainer(['A', 'D']).is_perfect_consonant())
self.assert_(not NoteContainer(['A', 'D']).is_perfect_consonant(False))
self.assert_(not NoteContainer().from_chord("Am").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("C").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("G").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("Dm").is_perfect_consonant())
self.assert_(not NoteContainer().from_chord("E").is_perfect_consonant())
def test_is_imperfect_consonant(self):
self.assert_(NoteContainer(['A', 'C']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'C#']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'F']).is_imperfect_consonant())
self.assert_(NoteContainer(['A', 'F#']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A', 'B']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A', 'E']).is_imperfect_consonant())
self.assert_(not NoteContainer(['A-4', 'A-5']).is_imperfect_consonant())
def test_is_dissonant(self):
self.assert_(NoteContainer().from_chord("E7").is_dissonant())
self.assert_(NoteContainer().from_chord("Am7").is_dissonant())
self.assert_(NoteContainer().from_chord("Gdim").is_dissonant())
self.assert_(not NoteContainer().from_chord("Am").is_dissonant())
self.assert_(not NoteContainer().from_chord("C").is_dissonant())
self.assert_(not NoteContainer().from_chord("G").is_dissonant())
self.assert_(not NoteContainer().from_chord("Dm").is_dissonant())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_NoteContainers)
|
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils import simplejson as json
from django.db.models import Q
from django.template import RequestContext
from geonode.utils import resolve_object
if "geonode.contrib.groups" in settings.INSTALLED_APPS:
from geonode.contrib.groups.models import Group
class AjaxLoginForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
username = forms.CharField()
def ajax_login(request):
if request.method != 'POST':
return HttpResponse(
content="ajax login requires HTTP POST",
status=405,
mimetype="text/plain"
)
form = AjaxLoginForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is None or not user.is_active:
return HttpResponse(
content="bad credentials or disabled user",
status=400,
mimetype="text/plain"
)
else:
login(request, user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponse(
content="successful login",
status=200,
mimetype="text/plain"
)
else:
return HttpResponse(
"The form you submitted doesn't look like a username/password combo.",
mimetype="text/plain",
status=400
)
def ajax_lookup(request):
if request.method != 'POST':
return HttpResponse(
content='ajax user lookup requires HTTP POST',
status=405,
mimetype='text/plain'
)
elif 'query' not in request.POST:
return HttpResponse(
content='use a field named "query" to specify a prefix to filter usernames',
mimetype='text/plain'
)
keyword = request.POST['query']
users = User.objects.filter(Q(username__startswith=keyword) |
Q(profile__name__contains=keyword) |
Q(profile__organization__contains=keyword))
if "geonode.contrib.groups" in settings.INSTALLED_APPS:
groups = Group.objects.filter(Q(title__startswith=keyword) |
Q(description__contains=keyword))
json_dict = {
'users': [({'username': u.username}) for u in users],
'count': users.count(),
}
if "geonode.contrib.groups" in settings.INSTALLED_APPS:
json_dict['groups'] = [({'name': g.slug}) for g in groups]
return HttpResponse(
content=json.dumps(json_dict),
mimetype='text/plain'
)
def err403(request):
return HttpResponseRedirect(reverse('account_login') + '?next=' + request.get_full_path())
|
'''
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
from topicutils import stringize
class ListenerNotValidatable(RuntimeError):
'''
Raised when an attempt is made to validate a listener relative to a
topic that doesn't have (yet) a Listener Protocol Specification.
'''
def __init__(self):
msg = 'Topics args not set yet, cannot validate listener'
RuntimeError.__init__(self, msg)
class UndefinedTopic(RuntimeError):
'''
Raised when an attempt is made to retrieve a Topic object
for a topic name that hasn't yet been created.
'''
def __init__(self, topicName, msgFormat=None):
if msgFormat is None:
msgFormat = 'Topic "%s" doesn\'t exist'
RuntimeError.__init__(self, msgFormat % topicName)
class UndefinedSubtopic(UndefinedTopic):
'''
Raised when an attempt is made to retrieve a Topic object
for a subtopic name that hasn't yet been created within
its parent topic.
'''
def __init__(self, parentName, subName):
msgFormat = 'Topic "%s" doesn\'t have "%%s" as subtopic' % parentName
UndefinedTopic.__init__(self, subName, msgFormat)
class ListenerSpecIncomplete(RuntimeError):
'''
Raised when an attempt is made to create a topic for which
a specification is not available, but pub.setTopicUnspecifiedFatal()
was called.
'''
def __init__(self, topicNameTuple):
msg = "No topic specification for topic '%s'." \
% stringize(topicNameTuple)
RuntimeError.__init__(self, msg +
" See pub.getOrCreateTopic(), pub.addTopicDefnProvider(), and/or pub.setTopicUnspecifiedFatal()")
class ListenerSpecInvalid(RuntimeError):
'''
Raised when an attempt is made to define a topic's Listener Protocol
Specification to something that is not valid.
The argument names that are invalid can be put in the 'args' list,
and the msg should say what is the problem and contain "%s" for the
args, such as ListenerSpecInvalid('duplicate args %s', ('arg1', 'arg2')).
'''
def __init__(self, msg, args):
argsMsg = msg % ','.join(args)
RuntimeError.__init__(self, 'Invalid listener spec: ' + argsMsg)
class ExcHandlerError(RuntimeError):
'''
When an exception gets raised within some listener during a
sendMessage(), the registered handler (see pub.setListenerExcHandler())
gets called (via its __call__ method) and the send operation can
resume on remaining listeners. However, if the handler itself
raises an exception while it is being called, the send operation
must be aborted: an ExcHandlerError exception gets raised.
'''
def __init__(self, badExcListenerID, topicObj, origExc=None):
'''The badExcListenerID is the name of the listener that raised
the original exception that handler was attempting to handle.
The topicObj is the pub.Topic object for the topic of the
sendMessage that had an exception raised.
The origExc is currently not used. '''
self.badExcListenerID = badExcListenerID
import traceback
self.exc = traceback.format_exc()
msg = 'The exception handler registered with pubsub raised an ' \
+ 'exception, *while* handling an exception raised by listener ' \
+ ' "%s" of topic "%s"):\n%s' \
% (self.badExcListenerID, topicObj.getName(), self.exc)
RuntimeError.__init__(self, msg)
|
"""
Module to contain internal system macros for operating on a configuration.
"""
import inspect
import rose.macro
import compulsory
import duplicate
import format
import rule
import trigger
import value
MODULES = [compulsory, duplicate, format, rule, trigger, value]
class DefaultTransforms(rose.macro.MacroTransformerCollection):
"""Runs all the default fixers, such as trigger fixing."""
def __init__(self):
macros = []
macro_info_tuples = rose.macro.get_macro_class_methods(MODULES)
for module_name, class_name, method, help in macro_info_tuples:
macro_name = ".".join([module_name, class_name])
if method == rose.macro.TRANSFORM_METHOD:
for module in MODULES:
if module.__name__ == module_name:
macro_inst = getattr(module, class_name)()
macros.append(macro_inst)
super(DefaultTransforms, self).__init__(*macros)
class DefaultValidators(rose.macro.MacroValidatorCollection):
"""Runs all the default checks, such as compulsory checking."""
def __init__(self):
macros = []
macro_info_tuples = rose.macro.get_macro_class_methods(MODULES)
for module_name, class_name, method, help in macro_info_tuples:
macro_name = ".".join([module_name, class_name])
if method == rose.macro.VALIDATE_METHOD:
for module in MODULES:
if module.__name__ == module_name:
macro_inst = getattr(module, class_name)()
macros.append(macro_inst)
super(DefaultValidators, self).__init__(*macros)
|
""" move_base_square.py - Version 1.1 2013-12-20
Command a robot to move in a square using move_base actions..
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2012 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, Point, Quaternion, Twist
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import radians, pi
class MoveBaseSquare():
def __init__(self):
rospy.init_node('nav_test', anonymous=False)
rospy.on_shutdown(self.shutdown)
# How big is the square we want the robot to navigate?
square_size = rospy.get_param("~square_size", 1.0) # meters
# Create a list to hold the target quaternions (orientations)
quaternions = list()
# First define the corner orientations as Euler angles
euler_angles = (pi/2, pi, 3*pi/2, 0)
# Then convert the angles to quaternions
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
# Create a list to hold the waypoint poses
waypoints = list()
# Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
waypoints.append(Pose(Point(square_size, 0.0, 0.0), quaternions[0]))
waypoints.append(Pose(Point(square_size, square_size, 0.0), quaternions[1]))
waypoints.append(Pose(Point(0.0, square_size, 0.0), quaternions[2]))
waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
# Initialize the visualization markers for RViz
self.init_markers()
# Set a visualization marker at each waypoint
for waypoint in waypoints:
p = Point()
p = waypoint.position
self.markers.points.append(p)
# Publisher to manually control the robot (e.g. to stop it, queue_size=5)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
rospy.loginfo("Starting navigation test")
# Initialize a counter to track waypoints
i = 0
# Cycle through the four waypoints
while i < 4 and not rospy.is_shutdown():
# Update the marker display
self.marker_pub.publish(self.markers)
# Intialize the waypoint goal
goal = MoveBaseGoal()
# Use the map frame to define goal poses
goal.target_pose.header.frame_id = 'map'
# Set the time stamp to "now"
goal.target_pose.header.stamp = rospy.Time.now()
# Set the goal pose to the i-th waypoint
goal.target_pose.pose = waypoints[i]
# Start the robot moving toward the goal
self.move(goal)
i += 1
def move(self, goal):
# Send the goal pose to the MoveBaseAction server
self.move_base.send_goal(goal)
# Allow 1 minute to get there
finished_within_time = self.move_base.wait_for_result(rospy.Duration(60))
# If we don't get there in time, abort the goal
if not finished_within_time:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
def init_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker, queue_size=5)
# Initialize the marker points list.
self.markers = Marker()
self.markers.ns = marker_ns
self.markers.id = marker_id
self.markers.type = Marker.CUBE_LIST
self.markers.action = Marker.ADD
self.markers.lifetime = rospy.Duration(marker_lifetime)
self.markers.scale.x = marker_scale
self.markers.scale.y = marker_scale
self.markers.color.r = marker_color['r']
self.markers.color.g = marker_color['g']
self.markers.color.b = marker_color['b']
self.markers.color.a = marker_color['a']
self.markers.header.frame_id = 'odom'
self.markers.header.stamp = rospy.Time.now()
self.markers.points = list()
def shutdown(self):
rospy.loginfo("Stopping the robot...")
# Cancel any active goals
self.move_base.cancel_goal()
rospy.sleep(2)
# Stop the robot
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
MoveBaseSquare()
except rospy.ROSInterruptException:
rospy.loginfo("Navigation test finished.")
|
'''
The index is a mapping of work stems to lists of objects whose attributes
contain the strings. The results are type, object pairs.
Basic workflow to build the search index:
For each term--
- reduce it to a set of words
- subtract stopwords
- stem each word and add the term's id to the search index
- add the removed tail to another set corresponding to each stem.
If the result is not huge, expand the search index to include fragments
of stemmed words as well.
'''
import sys
import json
import collections
import itertools
import time
import datetime
import re
import nltk
class IndexBuilder(object):
def __init__(self):
self.stem2id = {}
self.index = collections.defaultdict(lambda: set())
self.tails = collections.defaultdict(lambda: set())
self.stemmer = nltk.stem.porter.PorterStemmer()
self.stopwords = set(nltk.corpus.stopwords.words('english'))
self.stem_word = self.stemmer.stem_word
self.f = lambda _s: len(_s) > 1
self.new_id = itertools.count()
self.objects = {}
def get_stem_id(self, stem):
# Get the stem's id (normalize stems)
try:
stem_id = self.stem2id[stem]
except KeyError:
stem_id = self.stem2id[stem] = self.new_id.next()
return stem_id
def _add_word(self, word, object_id, do_stem=True):
# Get the stem and tail.
if do_stem:
stem = self.stem_word(word)
tail = word.replace(stem, '', 1)
else:
stem = word
stem_id = self.get_stem_id(stem)
# Augment the index collection.
self.index[stem_id].add(object_id)
# Augment the tail collection.
if do_stem and tail:
self.tails[stem_id].add(tail)
# If stem diffs from word, add word as well.
if stem != word:
stem_id = self.get_stem_id(word)
self.index[stem_id].add(object_id)
def add(self, object_type, objects, text_func=None, substrs=False,
all_substrs=False, storekeys=None):
object_store = self.objects
#more_text = self.more_text
for obj in objects:
id_ = obj['_id']
if storekeys:
obj_ = dict(zip(storekeys, map(obj.get, storekeys)))
else:
obj_ = obj
object_store[id_] = obj_
text = text_func(obj)
#text += more_text(obj)
words = set(filter(self.f, re.findall(r'\w+', text.lower())))
words = words - self.stopwords
add_word = self._add_word
for w in words:
add_word(w, id_)
if all_substrs:
for substring in substrings(w):
add_word(substring, id_, do_stem=False)
elif substrs:
for substring in substrings(w, from_beginning_only=True):
add_word(substring, id_, do_stem=False)
@property
def id2stem(self):
return dict(t[::-1] for t in self.stem2id.items())
def jsondata(self):
return {
'stem2id': self.stem2id,
'id2stem': self.id2stem,
'index': dict((k, list(v)) for (k, v) in self.index.items()),
'tails': dict((k, list(v)) for (k, v) in self.tails.items()),
'objects': self.objects
}
def as_json(self, showsizes=False):
data = self.jsondata()
if showsizes:
from utils import humanize_bytes
for k, v in data.items():
js = json.dumps(v, cls=JSONDateEncoder)
print 'size of', k, humanize_bytes(sys.getsizeof(js))
return data
def dump(self, fp):
json.dump(self.jsondata(), fp, cls=JSONDateEncoder)
def query(self, word):
stem = self.stem_word(word)
stem_id = self.stem2id[stem]
return self.index[stem_id]
def qq(self, word):
stem = self.stem_word(word)
stem_id = self.stem2id[stem]
pairs = self.index[stem_id]
objects = self.objects
for type_, id_ in pairs:
yield objects[type_][id_]
def more_text(self, obj, clean_html=nltk.clean_html):
try:
with open('billtext/{state}/{_id}'.format(**obj)) as f:
html = f.read().decode('utf-8')
return clean_html(html)
except IOError:
return ''
class JSONDateEncoder(json.JSONEncoder):
"""
JSONEncoder that encodes datetime objects as Unix timestamps.
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return time.mktime(obj.utctimetuple())
elif isinstance(obj, datetime.date):
return time.mktime(obj.timetuple())
return json.JSONEncoder.default(self, obj)
def substrings(word, from_beginning_only=False):
'''A generator of all substrings in `word`
greater than 1 character in length.'''
w_len = len(word)
w_len_plus_1 = w_len + 1
i = 0
while i < w_len:
j = i + 2
while j < w_len_plus_1:
yield word[i:j]
j += 1
if from_beginning_only:
return
i += 1
def trie_add(values, trie=None, terminus=0):
'''Given a trie (or rather, a dict), add the match terms into the
trie.
'''
if trie is None:
trie = {}
for value in values:
this = trie
w_len = len(value) - 1
for i, c in enumerate(value):
try:
this = this[c]
except KeyError:
this[c] = {}
this = this[c]
if i == w_len:
this[terminus] = value
return trie
|
import os
import tempfile
def mkstemppath(suffix='', prefix='tmp', dir=None, text=False):
"""Return the name of a temporary file that we can use."""
f = tempfile.NamedTemporaryFile(delete=True)
name = f.name
f.close()
return name
|
from util import hook, http, timesince
from datetime import datetime
from BeautifulSoup import BeautifulSoup
import re
baseurl = "http://www.metal-archives.com/"
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.command('maband', autohelp=False)
@hook.command(autohelp=False)
def maband(inp, conn=None, bot=None,nick=None, chan=None):
"""maband [band] -- Displays band info
from metal archives."""
if not inp:
return "You must specify a band"
response = http.get_json(baseurl + "search/ajax-advanced/searching/bands",
bandName=inp, exactBandMatch=0, sEcho=1, iColumns=3,
sColumns='', iDisplayStart=0, iDisplayLength=200, sNames=',,')
if response["error"] != "":
return "Error: {}.".format(response["error"])
if response["iTotalRecords"] == 0:
return "No bands were found"
bands = response["aaData"]
totalBands = response["iTotalRecords"]
bandCounter = 5
if totalBands < bandCounter:
bandCounter = totalBands
out = ""
for band in bands[:bandCounter]:
soup = BeautifulSoup(band[0])
links = soup.findAll('a')
bandLink = links[0]["href"]
bandName = links[0].contents[0]
bandGenre = band[1]
bandCountry = band[2]
conn.send(u"PRIVMSG {} :\x02{}\x0f - {} from {} (More info: {})".format(chan, bandName, bandGenre, bandCountry, bandLink))
return u"{} bands containing the name {}".format(len(bands), inp)
@hook.command('mareviews', autohelp=False)
@hook.command(autohelp=False)
def mareviews(inp, conn=None, bot=None,nick=None, chan=None):
"""marating [band] -- Displays band rating
from metal archives."""
if not inp:
return "You must specify a band"
comms = inp.split(",")
album = None
if(len(comms) == 1):
inp = comms[0].strip()
else:
inp = comms[0].strip()
album = comms[1]
album = str(album).strip()
response = http.get_json(baseurl + "search/ajax-advanced/searching/bands",
bandName=inp, exactBandMatch=0, sEcho=1, iColumns=3,
sColumns='', iDisplayStart=0, iDisplayLength=200, sNames=',,')
if response["error"] != "":
return "Error: {}.".format(response["error"])
if response["iTotalRecords"] == 0:
return u"No bands were found named {}".format(inp)
bands = response["aaData"]
band = BeautifulSoup(bands[0][0]).findAll("a")[0].contents[0]
href = BeautifulSoup(bands[0][0]).findAll("a")[0]["href"]
regex1 = re.compile("(?<=bands/).*\/")
rawBand = regex1.findall(href)[0]
regex2 = re.compile("(?<={}/).*".format(rawBand.replace("/", "")))
bandId = regex2.findall(href)[0]
reviews = http.get_json(baseurl + "review/ajax-list-band/id/{}/json/1".format(bandId),
sEcho=1, iColumns=4, sColums='', iDisplayStart=0, iDisplayLength=200,
mDataProp_0=0, mDataProp_1=1, mDataProp_2=2, mDataProp_3=3, iSortingCols=1, iSortCol_0=3,
sSortDir_0="desc", bSortable_0="true", bSortable_1="true", bSortable_2="true",
bSortable_3="true")
percentages = []
if not album:
if type(reviews["aaData"]) == list and len(reviews["aaData"]) > 0:
for review in reviews["aaData"]:
percentages.append(int(review[1].replace("%", "")))
average = reduce(lambda x, y: x + y, percentages) / len(percentages)
return u'\x02{}\x0f has an average review of \x02{}\x0f% based on their album reviews. Use "," to separate artist, album.'.format(band, average)
else:
return u'Could not calculate average review for {} or too many bands with the same name. Use "," to seperate artist, album.'.format(band)
else:
if type(reviews["aaData"]) == list and len(reviews["aaData"]) > 0:
fullAlbum = ""
if reviews["aaData"] == list:
for review in reviews["aaData"]:
ulink = review[0]
alink = BeautifulSoup(ulink).findAll("a")
text = alink[0].contents[0].lower()
if text == album.lower() or text.find(album) != -1:
percentages.append(int(review[1].replace("%", "")))
fullAlbum = alink[0].contents[0]
if len(percentages) > 0:
average = reduce(lambda x, y: x + y, percentages) / len(percentages)
return u'The album \x02{}\x0f by \x02{}\x0f has an average review of \x02{}\x0f%'.format(fullAlbum, band, average)
else:
return u'Could not find the album {} for the band {}'.format(album, band)
else:
return u'Could not find reviews for album like "{}" by {}.'.format(album, band)
else:
return u'Could not calculate average review for {} or too many bands with the same name'.format(band)
|
masker = NiftiMasker(haxby_data.mask_vt[0])
X = masker.fit_transform(func_file)
plot_roi(masker.mask_img_)
|
class Solution:
def intToRoman(self, num: int) -> str:
r = str()
c, num = num // 1000, num % 1000
r += 'M' * c
c, num = num // 100, num % 100
if c == 9:
r += 'CM'
elif c in range(5, 9):
r += 'D' + 'C' * (c - 5)
elif c == 4:
r += 'CD'
else:
r += 'C' * c
c, num = num // 10, num % 10
if c == 9:
r += 'XC'
elif c in range(5, 9):
r += 'L' + 'X' * (c - 5)
elif c == 4:
r += 'XL'
else:
r += 'X' * c
c = num
if c == 9:
r += 'IX'
elif c in range(5, 9):
r += 'V' + 'I' * (c - 5)
elif c == 4:
r += 'IV'
else:
r += 'I' * c
return r
|
from __future__ import unicode_literals
from django.apps import AppConfig
class PadronConfig(AppConfig):
name = 'padron'
verbose_name = 'Padron Colorado'
|
"""
maya2katana
Copyright (C) 2016-2019 Andriy Babak, Animagrad
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Author: Andriy Babak
e-mail: ababak@gmail.com
------------------------------
Copy shader nodes to Katana
------------------------------
"""
__version__ = "3.3.4"
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import utils
reload(utils)
import clip
reload(clip)
copy = clip.copy
generate_xml = clip.generate_xml
del clip
|
import os
import shutil
import itertools
from slpkg.messages import Msg
from slpkg.utils import Utils
from slpkg.__metadata__ import MetaData as _meta_
class NewConfig(Utils):
"""Manage .new configuration files
"""
def __init__(self):
self.meta = _meta_
self.msg = Msg()
self.red = self.meta.color["RED"]
self.green = self.meta.color["GREEN"]
self.endc = self.meta.color["ENDC"]
self.br = ""
if self.meta.use_colors in ["off", "OFF"]:
self.br = ")"
self.etc = "/etc/"
self.news = []
def run(self):
"""print .new configuration files
"""
self.find_new()
for n in self.news:
print(n)
print()
self.msg.template(78)
print(f"| Installed {len(self.news)} new configuration files:")
self.msg.template(78)
self.choices()
def find_new(self):
"""Find all '.new' files from /etc/ folder
and subfolders
"""
print("Search for .new configuration files:\n")
for path, dirs, files in os.walk(self.etc):
del dirs # delete unsed
for f in files:
if f.endswith(".new"):
self.news.append(os.path.join(path, f))
if not self.news:
print(" No new configuration files\n")
raise SystemExit()
def choices(self):
"""Menu options for new configuration files
"""
print(f"| {self.red}K{self.endc}{self.br}eep the old and .new files, no changes")
print(f"| {self.red}O{self.endc}{self.br}verwrite all old configuration files with new ones")
print("| The old files will be saved with suffix .old")
print(f"| {self.red}R{self.endc}{self.br}emove all .new files")
print(f"| {self.red}P{self.endc}{self.br}rompt K, O, R, D, M option for each single file")
print(f"| {self.red}Q{self.endc}{self.br}uit from menu")
self.msg.template(78)
try:
choose = input("\nWhat would you like to do [K/O/R/P/Q]? ")
except EOFError:
print()
raise SystemExit()
print()
if choose in ("K", "k"):
self.keep()
elif choose in ("O", "o"):
self.overwrite_all()
elif choose in ("R", "r"):
self.remove_all()
elif choose in ("P", "p"):
self.prompt()
def overwrite_all(self):
"""Overwrite all .new files and keep
old with suffix .old
"""
for n in self.news:
self._overwrite(n)
def remove_all(self):
"""Remove all .new files
"""
for n in self.news:
self._remove(n)
print()
def prompt(self):
"""Select file
"""
self.msg.template(78)
print("| Choose what to do file by file:")
print("| {0}K{1}{2}eep, {3}O{4}{5}verwrite, {6}R{7}{8}emove, "
"{9}D{10}{11}iff, {12}M{13}{14}erge, {15}Q{16}{17}uit".format(
self.red, self.endc, self.br, self.red, self.endc, self.br,
self.red, self.endc, self.br, self.red, self.endc, self.br,
self.red, self.endc, self.br, self.red, self.endc, self.br))
self.msg.template(78)
print()
self.i = 0
try:
while self.i < len(self.news):
self.question(self.news[self.i])
self.i += 1
except EOFError:
print()
raise SystemExit()
def question(self, n):
"""Choose what do to file by file
"""
print()
prompt_ask = input(f"{n} [K/O/R/D/M/Q]? ")
print()
if prompt_ask in ("K", "k"):
self.keep()
elif prompt_ask in ("O", "o"):
self._overwrite(n)
elif prompt_ask in ("R", "r"):
self._remove(n)
elif prompt_ask in ("D", "d"):
self.diff(n)
self.i -= 1
elif prompt_ask in ("M", "m"):
self.merge(n)
elif prompt_ask in ("Q", "q", "quit"):
self.quit()
def _remove(self, n):
"""Remove one single file
"""
if os.path.isfile(n):
os.remove(n)
if not os.path.isfile(n):
print(f"File '{n}' removed")
def _overwrite(self, n):
"""Overwrite old file with new and keep file with suffix .old
"""
if os.path.isfile(n[:-4]):
shutil.copy2(n[:-4], n[:-4] + ".old")
print("Old file {0} saved as {1}.old".format(
n[:-4].split("/")[-1], n[:-4].split("/")[-1]))
if os.path.isfile(n):
shutil.move(n, n[:-4])
print("New file {0} overwrite as {1}".format(
n.split("/")[-1], n[:-4].split("/")[-1]))
def keep(self):
pass
def diff(self, n):
"""Print the differences between the two files
"""
if os.path.isfile(n[:-4]):
diff1 = self.read_file(n[:-4]).splitlines()
if os.path.isfile(n):
diff2 = self.read_file(n).splitlines()
lines, ln, c = [], 0, 0
for a, b in itertools.izip_longest(diff1, diff2):
ln += 1
if a != b:
for s1, s2 in itertools.izip_longest(str(a), str(b)):
c += 1
if s1 != s2:
break
print(f"@@ -{ln},{c} +{ln},{c} @@\n")
for line in lines[-3:]:
print(f"{line}")
if a is None:
a = ""
print(f"{self.red}-{self.endc}{a}")
if b is None:
b = ""
print(f"{self.green}+{self.endc}{b}")
lines = []
c = 0
else:
lines.append(a)
def merge(self, n):
"""Merge new file into old
"""
if os.path.isfile(n[:-4]):
old = self.read_file(n[:-4]).splitlines()
if os.path.isfile(n):
new = self.read_file(n).splitlines()
with open(n[:-4], "w") as out:
for l1, l2 in itertools.izip_longest(old, new):
if l1 is None:
l1 = ""
if l2 is None:
l2 = ""
if l1 != l2:
out.write(l2 + "\n")
else:
out.write(l1 + "\n")
print("The file {0} merged in file {1}".format(
n.split("/")[-1], n[:-4].split("/")[-1]))
def quit(self):
raise SystemExit()
|
"""
Inproc transport classes.
"""
import asyncio
from ..common import (
AsyncBox,
ClosableAsyncObject,
CompositeClosableAsyncObject,
cancel_on_closing,
)
class Channel(ClosableAsyncObject):
def on_open(self, path):
super().on_open()
self._path = path
self._linked_channel = None
self._inbox = AsyncBox(loop=self.loop)
async def on_close(self):
self._inbox.close()
if self._linked_channel:
self._linked_channel.close()
self._linked_channel._linked_channel = None
self._linked_channel = None
await self._inbox.wait_closed()
@property
def path(self):
return self._path
def __repr__(self):
return 'Channel(path=%r)' % self._path
def link(self, channel):
self._linked_channel = channel
channel._linked_channel = self
@cancel_on_closing
async def read(self):
return await self._inbox.read()
@cancel_on_closing
async def write(self, item):
if self._linked_channel:
await self._linked_channel._inbox.write(item)
class InprocServer(CompositeClosableAsyncObject):
def on_open(self, handler):
super().on_open()
self._handler = handler
self._tasks = []
async def on_close(self):
if self._tasks:
await asyncio.wait(self._tasks[:])
await super().on_close()
@cancel_on_closing
async def create_channel(self, path):
left = Channel(path=path, loop=self.loop)
self.register_child(left)
right = Channel(path=path, loop=self.loop)
self.register_child(right)
left.link(right)
task = asyncio.ensure_future(self._handler(right))
self._tasks.append(task)
@task.add_done_callback
def remove_task(future):
self._tasks.remove(task)
return left
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='note',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
"""
Created on Sat Feb 21 16:05:41 2015
@author: Vidar Tonaas Fauske
"""
from .mainwindowbase import MainWindowBase, tr
import os
import inspect
from functools import partial
from qtpy import QtGui, QtCore, QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QDialogButtonBox
from hyperspyui.smartcolorsvgiconengine import SmartColorSVGIconEngine
from hyperspyui.advancedaction import AdvancedAction
from hyperspyui.log import logger as _logger
from hyperspyui import hooktraitsui
hooktraitsui.hook_traitsui()
class MainWindowUtils(MainWindowBase):
"""
Adds UI utility functions to the main window, including traitsui dialog
capture.
"""
def __init__(self, parent=None):
# traitsui backend bindings
hooktraitsui.connect_created(self.on_traits_dialog)
hooktraitsui.connect_destroyed(self.on_traits_destroyed)
super(MainWindowUtils, self).__init__(parent)
# --------- traitsui Events ---------
def capture_traits_dialog(self, callback):
self.should_capture_traits = callback
def on_traits_dialog(self, dialog, ui, parent):
self.traits_dialogs.append(dialog)
if parent is None:
if self.should_capture_traits:
self.should_capture_traits(dialog)
self.should_capture_traits = None
else:
dialog.setParent(self, QtCore.Qt.Tool)
dialog.show()
dialog.activateWindow()
def on_traits_destroyed(self, dialog):
if dialog in self.traits_dialogs:
self.traits_dialogs.remove(dialog)
# --------- End traitsui Events ---------
def set_status(self, msg):
"""
Display 'msg' in window's statusbar.
"""
# TODO: What info is needed? Add simple label first, create utility to
# add more?
self.statusBar().showMessage(msg)
def _make_action(self, label, icon, shortcut, tip):
if icon is None:
ac = AdvancedAction(tr(label), self)
else:
icon = self.make_icon(icon)
ac = AdvancedAction(icon, tr(label), self)
if shortcut is not None:
ac.setShortcut(shortcut)
if tip is not None:
ac.setStatusTip(tr(tip))
return ac
def _wire_action(self, ac, key, callback, selection_callback):
try:
keywords = inspect.getargspec(callback).args
except TypeError:
keywords = None
if keywords and 'advanced' in keywords:
orig_callback = callback
def wrap(checked, advanced):
orig_callback(advanced=advanced)
callback = wrap
ac.triggered[bool, bool].connect(callback)
else:
ac.triggered.connect(callback)
# Use docstring for action
if callback.__doc__:
d = callback.__doc__
if d.startswith('partial('):
# Fix docstring of partial functions:
d = callback.func.__doc__
ac.__doc__ = d
if selection_callback is not None:
self._action_selection_cbs[key] = selection_callback
ac.setEnabled(False)
def add_action(self, key, label, callback, tip=None, icon=None,
shortcut=None, selection_callback=None):
"""
Create and add a QAction to self.actions[key]. 'label' is used as the
short description of the action, and 'tip' as the long description.
The tip is typically shown in the statusbar. The callback is called
when the action is triggered(). The optional 'icon' should either be a
QIcon, or a path to an icon file, and is used to depict the action on
toolbar buttons and in menus.
If `selection_callback` is supplied, it is called whenever the
currently selected signal/figure changes. This allows the callback to
enable/disable the action to reflect whether the selected figure/signal
is supported for the action.
"""
ac = self._make_action(label, icon, shortcut, tip)
self._wire_action(ac, key, callback, selection_callback)
self.actions[key] = ac
return ac
def add_toolbar_button(self, category, action):
"""
Add the supplied 'action' as a toolbar button. If the toolbar defined
by 'cateogry' does not exist, it will be created in
self.toolbars[category].
"""
if category in self.toolbars:
tb = self.toolbars[category]
else:
tb = QtWidgets.QToolBar(tr(category) + tr(" toolbar"), self)
tb.setObjectName(category + "_toolbar")
self.addToolBar(Qt.LeftToolBarArea, tb)
self.toolbars[category] = tb
if not isinstance(action, QtWidgets.QAction):
action = self.actions[action]
tb.addAction(action)
def remove_toolbar_button(self, category, action):
tb = self.toolbars[category]
tb.removeAction(action)
if len(tb.actions()) < 1:
self.removeToolBar(tb)
def add_menuitem(self, category, action, label=None):
"""
Add the supplied 'action' as a menu entry. If the menu defined
by 'cateogry' does not exist, it will be created in
self.menus[category].
If the label argument is not supplied, category will be used.
"""
if category in self.menus:
m = self.menus[category]
else:
if label is None:
label = category
# Make sure we add menu before window menu
if self.windowmenu is None:
m = self.menuBar().addMenu(label)
else:
m = QtWidgets.QMenu(label)
self.menuBar().insertMenu(self.windowmenu.menuAction(), m)
self.menus[category] = m
if not isinstance(action, QtWidgets.QAction):
action = self.actions[action]
m.addAction(action)
def add_tool(self, tool, selection_callback=None):
if isinstance(tool, type):
t = tool(self.figures)
key = tool.__name__
else:
t = tool
try:
key = t.get_name()
except NotImplementedError:
key = tool.__class__.__name__
self.tools.append(t)
if t.single_action() is not None:
self.add_action(key, t.get_name(), t.single_action(),
selection_callback=selection_callback,
icon=t.get_icon(), tip=t.get_description())
self.add_toolbar_button(t.get_category(), self.actions[key])
elif t.is_selectable():
f = partial(self.select_tool, t)
self.add_action(key, t.get_name(), f, icon=t.get_icon(),
selection_callback=selection_callback,
tip=t.get_description())
self.selectable_tools.addAction(self.actions[key])
self.actions[key].setCheckable(True)
self.add_toolbar_button(t.get_category(), self.actions[key])
return key
def remove_tool(self, tool):
if isinstance(tool, type):
for t in self.tools:
if isinstance(t, tool):
break
key = tool.__name__
else:
t = tool
try:
key = t.get_name()
except NotImplementedError:
key = tool.__class__.__name__
self.tools.remove(t)
ac = self.actions.pop(key, None)
if ac is not None:
self.remove_toolbar_button(t.get_category(), ac)
if t.is_selectable():
self.selectable_tools.removeAction(ac)
def add_widget(self, widget, floating=None):
"""
Add the passed 'widget' to the main window. If the widget is not a
QDockWidget, it will be wrapped into one. The QDockWidget is returned.
The widget is also added to the window menu self.windowmenu, so that
it's visibility can be toggled.
The parameter 'floating' specifies whether the widget should be made
floating. If None, the value of the setting 'default_widget_floating'
is used.
"""
if floating is None:
floating = self.settings['default_widget_floating', bool]
if isinstance(widget, QtWidgets.QDockWidget):
d = widget
else:
d = QtWidgets.QDockWidget(self)
d.setWidget(widget)
d.setWindowTitle(widget.windowTitle())
if not d.objectName():
d.setObjectName(d.windowTitle())
d.setAllowedAreas(Qt.RightDockWidgetArea | Qt.LeftDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, d)
d.setFloating(floating)
self.widgets.append(widget)
# Insert widgets in Windows menu before separator (figures are after)
self.windowmenu.insertAction(self.windowmenu_sep, d.toggleViewAction())
return d
def make_icon(self, icon):
"""
Create an icon that coheres to the internal standard for icons.
Parameters
----------
icon: {string | QIcon}
If icon is a path, it loads the file. If the path does not
correspond to a valid file, it is checked if it is a valid
path relative to the 'images' folder of the package.
After loading, SVG files will be run through
`SmartColorSVGIconEngine` to adapt suitable icons to the
current palette. If a QIcon is passed directly, it is also
sent through `SmartColorSVGIconEngine`.
"""
if not isinstance(icon, QtGui.QIcon):
if isinstance(icon, str) and not os.path.isfile(icon):
sugg = os.path.dirname(__file__) + '/images/' + icon
if os.path.isfile(sugg):
icon = sugg
if isinstance(icon, str) and (
icon.endswith('svg') or
icon.endswith('svgz') or
icon.endswith('svg.gz')):
ie = SmartColorSVGIconEngine()
path = icon
icon = QtGui.QIcon(ie)
icon.addFile(path)
else:
icon = QtGui.QIcon(icon)
else:
icon = QtGui.QIcon(SmartColorSVGIconEngine(icon))
return icon
def prompt_files(self, extension_filter=None, path=None, exists=True,
title=None, def_filter=None):
if title is None:
title = tr('Load file') if exists else tr('Save file')
path = path or self.cur_dir or ''
if def_filter is None and extension_filter:
def_filter = extension_filter.split(';;', maxsplit=1)[0]
if exists:
filenames = QtWidgets.QFileDialog.getOpenFileNames(
self, title, path, extension_filter)
else:
filenames = QtWidgets.QFileDialog.getSaveFileName(
self, title, path, extension_filter, def_filter)
# Pyside returns tuple, PyQt not
if isinstance(filenames, tuple):
filenames = filenames[0]
return filenames
def get_figure_filepath_suggestion(self, figure, deault_ext=None):
"""
Get a suggestion for a file path for saving `figure`.
"""
canvas = figure.widget()
if deault_ext is None:
deault_ext = canvas.get_default_filetype()
f = canvas.get_default_filename()
if not f:
f = self.cur_dir
# Analyze suggested filename
base, tail = os.path.split(f)
fn, ext = os.path.splitext(tail)
_logger.debug('fn before cleaning is: {}'.format(fn))
# Remove illegal characters and newlines from filename:
reserved_characters = r'<>:"/\|?*'
for c in reserved_characters:
fn = fn.replace(c, '')
fn = fn.replace('\n', ' ')
_logger.debug('fn after cleaning is: {}'.format(fn))
# If no directory in filename, use self.cur_dir's dirname
if base is None or base == "":
base = os.path.dirname(self.cur_dir)
# If extension is not valid, use the defualt
if ext not in canvas.get_supported_filetypes():
ext = deault_ext
# Build suggestion and return
path_suggestion = os.path.sep.join((base, fn))
path_suggestion = os.path.extsep.join((path_suggestion, ext))
return path_suggestion
def save_figure(self, figure=None):
"""
Save the matplotlib figure. If a figure is not passed, it tries to
save whichever is active (using `activeSubWindow()` of the MDI area).
"""
if figure is None:
figure = self.main_frame.activeSubWindow()
if figure is None:
return
path_suggestion = self.get_figure_filepath_suggestion(figure)
canvas = figure.widget()
# Build type selection string
def_type = os.path.extsep + canvas.get_default_filetype()
extensions = canvas.get_supported_filetypes_grouped()
type_choices = "All types (*.*)"
for group, exts in extensions.items():
fmt = group + \
' (' + \
'; '.join(["*" + os.path.extsep + sube for sube in exts]) + ')'
type_choices = ';;'.join((type_choices, fmt))
if def_type[1:] in exts:
def_type = fmt
# Present filename prompt
filename = self.prompt_files(type_choices, path_suggestion,
exists=False, def_filter=def_type)
if filename:
canvas.figure.savefig(filename)
def show_okcancel_dialog(self, title, widget, modal=True):
"""
Show a dialog with the passed widget and OK and cancel buttons.
"""
diag = QtWidgets.QDialog(self)
diag.setWindowTitle(title)
diag.setWindowFlags(Qt.Tool)
btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, diag)
btns.accepted.connect(diag.accept)
btns.rejected.connect(diag.reject)
box = QtWidgets.QVBoxLayout(diag)
box.addWidget(widget)
box.addWidget(btns)
diag.setLayout(box)
if modal:
diag.exec_()
else:
diag.show()
# Return the dialog for result checking, and to keep widget in scope
# for caller
return diag
class MainWindowActionRecorder(MainWindowUtils):
"""
Adds recorder functionality.
"""
def __init__(self, parent=None):
self.recorders = []
super(MainWindowActionRecorder, self).__init__(parent)
def _wire_action(self, ac, key, callback, selection_callback):
# Connect monitor
ac.triggered.connect(partial(self.record_action, key))
# Wire as normal
super(MainWindowActionRecorder, self)._wire_action(
ac, key, callback, selection_callback)
def record_action(self, key):
for r in self.recorders:
r.add_action(key)
def record_code(self, code):
for r in self.recorders:
r.add_code(code)
def on_console_executing(self, source):
super(MainWindowActionRecorder, self).on_console_executing(source)
self.record_code(source)
|
from __future__ import absolute_import, division, print_function
from builtins import * # 'future' module
import sys
import logging
import states
if __name__ == '__main__':
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
states.start()
|
import re
import fs, fs.errors
from fs.base import FS as fsFS
class AppSourceConnectionError(Exception):
pass
class AppSourceConnection(object):
"""An AppSourceConnection provides methods to access apps in an AppSource."""
@staticmethod
def create(source, options):
"""Returns a new AppSourceConnection instance given an AppSource."""
if options.get("type") in AppSourceConnectionTypes:
return AppSourceConnectionTypes[options["type"]](source, options)
raise ValueError("Invalid AppSourceConnection type: %s" % repr(options.get("type")))
def list_apps(self):
raise Exception("Not implemented!")
def get_app(self, name):
raise Exception("Not implemented!")
class App(object):
"""An App is a remote definition of Modules and
associated resources."""
def __init__(self, store, name):
self.store = store
self.name = name
def __repr__(self):
return "<App {name} in {store}>".format(name=self.name, store=self.store)
def get_inputs(self):
raise Exception("Not implemented!")
def get_modules(self):
raise Exception("Not implemented!")
def get_assets(self):
raise Exception("Not implemented!")
def get_new_version_number(self):
# Check if the version of this app is different from
# the versions already in the database.
try:
app = read_yaml_file(self.read_file("app.yaml"))
version_number = app["catalog"]["version"]
except:
return None
# If the version number is in the database, there's
# nothing new to load.
from guidedmodules.models import AppVersion
if AppVersion.objects\
.filter(source=self.store.source,
appname=self.name,
version_number=version_number)\
.exists():
return None
# If it's new, return the version number.
return version_number
def get_appversions(self, show_in_catalog=True):
# Return the AppVerions in the database for this app
# that are included in the compliance apps catalog.
from guidedmodules.models import AppVersion
return AppVersion.objects\
.filter(source=self.store.source, appname=self.name, show_in_catalog=show_in_catalog)\
.order_by('created')
def get_hidden_appversions(self):
# Return the AppVersions that are in the database but
# are not shown in the compliance apps catalog.
return self.get_appversions(show_in_catalog=False)
class NullAppSourceConnection(AppSourceConnection):
"""The NullAppSourceConnection contains no apps."""
def __init__(self, source, options):
pass
def __enter__(self): return self
def __exit__(self, *args): return
def __repr__(self): return "<NullAppSourceConnection>"
def list_apps(self):
return
yield # make a generator
class MultiplexedAppSourceConnection(AppSourceConnection):
"""A subclass of AppSourceConnection that wraps other AppSourceConnection instances."""
def __init__(self, sources):
self.loaders = []
for ms in sources:
try:
self.loaders.append(AppSourceConnection.create(ms, ms.spec))
except ValueError as e:
raise ValueError('There was an error creating the AppSource "{}": {}'.format(ms.slug, e))
# Override "with ...:" semantics to enter and exit all of the
# connections this connection wraps.
def __enter__(self):
for loader in self.loaders:
loader.__enter__()
return self
def __exit__(self, *args):
exceptions = []
for loader in self.loaders:
try:
loader.__exit__(None, None, None)
except Exception as e:
exceptions.append((loader, e))
if exceptions:
raise Exception(exceptions)
def __repr__(self):
return "<MultiplexedAppSourceConnection %s>" % repr(self.loaders)
def list_apps(self):
# List all of the apps in all of the stores.
for ms in self.loaders:
for app in ms.list_apps():
yield app
class PyFsAppSourceConnection(AppSourceConnection):
"""Creates a connection from a Pyfilesystem2 filesystem, like
a local directory containing directories for apps."""
def __init__(self, source, fsfunc):
# Don't initialize the filesystem yet - just store the
# class and __init__ arguments.
self.source = source
self.fsfunc = fsfunc
def __enter__(self):
# Initialize at the start of the "with" block that this
# object is used in.
try:
# Open the filesystem.
self.root = self.fsfunc()
except (fs.errors.CreateFailed, fs.errors.ResourceNotFound) as e:
raise AppSourceConnectionError(
'There was an error accessing the AppSource "{}" which connects to {}. The error was: {}'.format(
self.source.slug,
self.source.get_description(),
str(e)
))
# Open catalog.yaml, which contains metadata attached to the apps.
self.catalog = self.load_catalog_file()
return self
def __exit__(self, *args):
# Clean up the filesystem object at the end of the "with"
# block.
if self.root:
self.root.close()
def __repr__(self):
return "<AppSourceConnection {src}>".format(src=self.source.get_description() if self.source else "<no AppSource>")
def load_catalog_file(self):
# Load catalog.yaml.
try:
with self.root.open("catalog.yaml") as f:
catalog = read_yaml_file(f)
except fs.errors.ResourceNotFound:
catalog = { }
# Ensure 'apps' key exists.
if not isinstance(catalog.get("apps"), dict):
catalog["apps"] = {}
return catalog
def list_apps(self):
# Every directory is an app containing app.yaml file
# which is the app's root module YAML.
for entry in self.root.scandir(""):
# Is this a valid app directory?
if not entry.is_dir: continue
if "app.yaml" not in self.root.listdir(entry.name): continue
# Yield an app instance.
yield PyFsApp(
self,
entry.name,
self.root.opendir(entry.name))
def get_app(self, name):
# Is this a valid app name? Force evaluation of scandir
# to check that the app directory and an app.yaml exist.
try:
if "app.yaml" not in self.root.listdir(name):
raise fs.errors.ResourceNotFound()
except fs.errors.ResourceNotFound:
raise ValueError("App {} not found in {} ({}).".format(
name,
self.source.slug,
self.source.get_description(),
))
return PyFsApp(
self,
name,
self.root.opendir(name))
class PyFsApp(App):
"""An App whose modules and assets are stored in a directory
layout rooted at a PyFilesystem2 file system."""
def __init__(self, store, name, fs):
super().__init__(store, name)
self.fs = fs
def get_fs(self):
return self.fs
def get_inputs(self):
app = read_yaml_file(self.read_file("app.yaml"))
# Read "inputs" if they exist
if "input" in app:
input_list = app["input"]
for input in PyFsApp.iter_inputs(self.fs, input_list):
yield input
@staticmethod
def iter_inputs(fs, input_list):
import hashlib # for filesystems that don't provide this info
for input in input_list:
if input["path"] is not None:
path = input["path"]
with fs.open(path, "rb") as input_file:
m = hashlib.sha256()
while True:
data = input_file.read(8192)
if not data:
break
m.update(data)
content_hash = m.hexdigest()
def make_content_loader(input_file_path):
def content_loader():
with fs.open(input_file_path, "rb") as input_file:
return input_file.read()
return content_loader
yield (path, input, content_hash, make_content_loader(path))
def get_modules(self):
# Return a generator over parsed YAML data for modules.
return self.iter_modules([])
def iter_modules(self, path):
from os.path import splitext
path_entries = self.fs.listdir('/'.join(path))
for entry in self.fs.scandir('/'.join(path)):
if not entry.is_dir:
# If this is a file that ends in .yaml, it is a module file.
# Strip the extension and construct a module ID that concatenates
# the path on disk and the file name.
fn_name, fn_ext = splitext(entry.name)
if fn_ext == ".yaml":
# The module ID combines its local path and the filename.
module_id = "/".join(path + [fn_name])
# Read the YAML file.
with self.fs.open(entry.name) as f:
module_spec = read_yaml_file(f)
yield (module_id, module_spec)
elif entry.name in ("assets", "private-assets"):
# Don't recurisvely walk into directories named 'assets' or
# 'private-assets'. These directories provide static assets
# that go along with the modules in that directory. 'assets'
# are public assets that are exposed by the web server.
pass
else:
# Recursively walk directories.
for module in self.iter_modules(path+[entry.name]):
yield module
def read_file(self, path):
with self.fs.open(path) as f:
return f.read()
def get_assets(self):
if "assets" in self.fs.listdir(''):
for asset in PyFsApp.iter_assets(self.fs, []):
yield asset
@staticmethod
def iter_assets(fs, path):
import hashlib # for filesystems that don't provide this info
import os.path # for join because we return virtual unixy paths
for entry in fs.scandir("/".join(["assets"] + path)):
if entry.is_dir:
for asset in iter_assets(fs, path+[entry.name]):
yield asset
else:
fn = "/".join(path + [entry.name])
with fs.open("assets/" + fn, "rb") as f:
m = hashlib.sha256()
while True:
data = f.read(8192)
if not data: break
m.update(data)
content_hash = m.hexdigest()
def make_content_loader(fn):
def content_loader():
with fs.open("assets/" + fn, "rb") as f:
return f.read()
return content_loader
yield (fn, content_hash, make_content_loader(fn))
class LocalDirectoryAppSourceConnection(PyFsAppSourceConnection):
"""An App Store provided by a local directory."""
def __init__(self, source, options):
from fs.osfs import OSFS
super().__init__(source, lambda : OSFS(options["path"]))
class SimplifiedReadonlyFilesystem(fsFS):
def listdir(self, path):
return [entry.name for entry in self.scandir(path)]
def getinfo(self, path, namespaces=[]):
parent_path = "/".join(path.split("/")[:-1])
name = path.split("/")[-1]
for entry in self.scandir(parent_path):
if entry.name == name:
return entry
raise ValueError("Path not found.")
def makedir(self, *args): raise Exception("Not implemented.")
def remove(self, *args): raise Exception("Not implemented.")
def removedir(self, *args): raise Exception("Not implemented.")
def setinfo(self, *args): raise Exception("Not implemented.")
class GithubApiFilesystem(SimplifiedReadonlyFilesystem):
"""
{ "type": "github", "repo": "orgname/reponame", ["path": "/subpath",] "auth": { "user": "...", "pw": "..." } }
"""
def __init__(self, repo, path, user, pw):
# Create client.
from github import Github
g = Github(user, pw)
self.repo = g.get_repo(repo)
self.path = (path or "") + "/"
# Run a quick call to check access.
from github.GithubException import GithubException
try:
self.repo.get_dir_contents(self.path)
except GithubException as e:
raise fs.errors.CreateFailed(e.data.get("message"))
def scandir(self, path, namespaces=None, page=None):
from fs.info import Info
for cf in self.repo.get_dir_contents(self.path + path):
yield Info({
"basic": {
"name": cf.name,
"is_dir": cf.type == "dir",
},
"hash": {
"sha1": cf.sha,
}
})
def openbin(self, path, mode="r", **options):
if mode not in ("r", "rb"): raise ValueError("Invalid open mode. Must be 'r' or 'rb'.")
import base64, io
cf = self.repo.get_contents(self.path + path)
if cf.type != "file": raise ValueError("path is a directory")
if cf.encoding != "base64": raise ValueError("content encoding is unrecognized")
content = base64.b64decode(cf.content)
return io.BytesIO(content)
class GithubApiAppSourceConnection(PyFsAppSourceConnection):
def __init__(self, source, options):
if not isinstance(options.get("repo"), str): raise ValueError("The AppSource is misconfigured: missing or invalid 'repo'.")
if not isinstance(options.get("path"), (str, type(None))): raise ValueError("The AppSource is misconfigured: missing or invalid 'path'.")
if not (isinstance(options.get("auth"), dict) and isinstance(options["auth"].get("user"), str)): raise ValueError("The AppSource is misconfigured: missing or invalid 'auth.user'.")
if not (isinstance(options.get("auth"), dict) and isinstance(options["auth"].get("pw"), str)): raise ValueError("The AppSource is misconfigured: missing or invalid 'auth.pw'.")
super().__init__(source, lambda : GithubApiFilesystem(
options["repo"], options.get("path"),
options["auth"]["user"], options["auth"]["pw"]))
class GitRepositoryFilesystem(SimplifiedReadonlyFilesystem):
def __init__(self, url, branch, path, ssh_key=None):
self.url = url
self.branch = branch or None
self.path = (path or "") + "/"
self.ssh_key = ssh_key
# Create a local git working directory.
import tempfile
self.tempdir_obj = tempfile.TemporaryDirectory()
self.tempdir = self.tempdir_obj.__enter__()
self.description = self.url + "/" + self.path.strip("/")
if self.branch:
self.description += "@" + self.branch
# Validate access.
self.getdir("")
def __repr__(self):
return "<gitfs '%s'>" % self.description
def close(self):
# Release the temporary directory.
self.tempdir_obj.__exit__(None, None, None)
def get_repo_root(self):
# Return cached tree.
if hasattr(self, "repo_root_tree"):
return self.repo_root_tree
import os, os.path
import git
# Create an empty git repo in the temporary directory.
self.repo = git.Repo.init(self.tempdir)
# Make SSH non-interactive.
ssh_options = "ssh -o StrictHostKeyChecking=no -o BatchMode=yes"
# If an SSH key is provided, store it in the temporary directory and
# then use it.
if self.ssh_key:
ssh_key_file = os.path.join(self.tempdir, "ssh.key")
old_umask = os.umask(0o077) # ssh requires group/world permissions to be zero
try:
with open(ssh_key_file, "wb") as f:
f.write(self.ssh_key.encode("ascii"))
finally:
os.umask(old_umask)
ssh_options += " -i " + ssh_key_file
self.repo.git.environment()["GIT_SSH_COMMAND"] = ssh_options
# For debugging, log a command that we could try on the command line.
#print("SSH_COMMAND=\"{ssh_options}\" git fetch --depth 1 {url} {branch}".format(
# ssh_options=ssh_options, url=self.url, branch=self.branch), file=sys.stderr)
# Fetch.
import git.exc
try:
self.repo.git.execute(
[
self.repo.git.git_exec_name,
"fetch",
"--depth", "1", # avoid getting whole repo history
self.url, # repo URL
self.branch or "", # branch to fetch
], kill_after_timeout=20)
except git.exc.GitCommandError as e:
# This is where errors occur, which is hopefully about auth.
raise fs.errors.CreateFailed("The repository URL is either not valid, not public, or ssh_key was not specified or not valid (%s)." % e.stderr)
# Get the tree for the remote branch's HEAD.
tree = self.repo.tree("FETCH_HEAD")
# The Pythonic way would be to add a remote for the remote repository, run
# fetch, and then access its ref.
#self.remote = self.repo.create_remote("origin", self.spec["url"])
#self.remote.fetch(self.spec.get("branch"))
#tree = self.repo.tree(self.remote.refs[0])
# If a path was given, move to that subdirectory.
# TODO: Check that paths with subdirectories that have no other content
# but an inner subdirectory work, because git does something funny about
# flattening empty directories.
for pathitem in self.path.split("/"):
if pathitem != "":
tree = tree[pathitem]
# Cache and return it.
self.repo_root_tree = tree
return tree
def getdir(self, path):
tree = self.get_repo_root()
for item in path.split("/"):
if item != "":
try:
tree = tree[item] # TODO: As above.
except KeyError:
raise fs.errors.ResourceNotFound(path)
return tree
def scandir(self, path, namespaces=None, page=None):
# Get the root tree and then move to the desired subdirectory.
from fs.info import Info
tree = self.getdir(path)
for item in tree:
if item.type not in ("tree", "blob"): continue
yield Info({
"basic": {
"name": item.name,
"is_dir": item.type == "tree",
},
"hash": {
"sha1": item.hexsha,
}
})
def openbin(self, path, mode="r", **options):
# Get the root tree and then move to the desired item.
if mode not in ("r", "rb"): raise ValueError("Invalid open mode. Must be 'r' or 'rb'.")
import io
tree = self.getdir(path)
return io.BytesIO(tree.data_stream.read())
class GitRepositoryAppSourceConnection(PyFsAppSourceConnection):
"""An App Store provided by a local directory."""
def __init__(self, source, options):
if not isinstance(options.get("url"), str): raise ValueError("The AppSource is misconfigured: missing or invalid 'url'.")
if not isinstance(options.get("branch"), (str, type(None))): raise ValueError("The AppSource is misconfigured: missing or invalid 'url'.")
if not isinstance(options.get("path"), (str, type(None))): raise ValueError("The AppSource is misconfigured: missing or invalid 'path'.")
super().__init__(source, lambda : GitRepositoryFilesystem(
options["url"], options.get("branch"), options.get("path"),
options.get("ssh_key")))
def read_yaml_file(f):
# Use the safe YAML loader via rtyaml, which loads mappings with
# OrderedDicts so that order is not lost, and catch errors.
import rtyaml, yaml.scanner, yaml.parser, yaml.constructor
try:
return rtyaml.load(f)
except (yaml.scanner.ScannerError, yaml.parser.ParserError, yaml.constructor.ConstructorError) as e:
raise AppSourceConnectionError("There was an error parsing the YAML file: " + str(e))
AppSourceConnectionTypes = {
"null": NullAppSourceConnection,
"local": LocalDirectoryAppSourceConnection,
"github": GithubApiAppSourceConnection,
"git": GitRepositoryAppSourceConnection,
}
|
from typing import Union
import pysonic
Playable = Union[pysonic.Song, pysonic.Album, pysonic.Artist, pysonic.Folder]
|
import socket
import ssl
import os
import Queue
import threading
import time
exec(open(os.path.join(os.path.dirname(__file__), "configs" + os.sep + "config.py"), "r").read())
if MPD:
import mpd
class ConnectionMan:
def __init__(self, threaddict, httpresp, global_confman):
global thread_types
thread_types = threaddict
global http_responses
http_responses = httpresp
self.confman = global_confman
#connect to mpd server
if MPD:
self.mpc = mpd.MPDClient()
self.mpc.connect(MPD_HOST, MPD_PORT)
# Could impose a limit, but not doing it yet
if IRC:
self.queue = Queue.Queue()
self.connect_irc()
# Trifecta of evil. Do with these as you please, but try not to break the functionality.
def queue_raw(self, text):
self.queue.put(str(text) + "\r\n", True)
# You may bypass the queue, if needed.
def send_raw(self, text):
self.lock.acquire()
self.s.send(str(text))
self.lock.release()
def queue_tick(self):
while True:
self.send_raw(self.queue.get(True))
time.sleep(OUTGOING_DELAY / 1000.0)
def join_irc(self, chan, nick=None, record=True):
self.queue_raw("JOIN " + chan)
while 1:
line = self.s.recv(2048)
line = line.strip("\r\n")
if("End of /NAMES list." in line):
print "\n*** %s joined! ***\n" % chan
break
else:
print line
time.sleep(1) # allows chan join to complete before messages are sent
if not nick == None:
self.privmsg("Invited by %s" % nick, chan)
self.privmsg("Home channel: %s" % HOME_CHANNEL, chan)
self.privmsg("Joined %s, invited by %s" % (chan, nick), HOME_CHANNEL)
if record:
chanlist = self.confman.get_value("IRC", "CHANS", [])
chanlist.append(chan)
self.confman.set_value("IRC", "CHANS", chanlist)
self.joined_chans.append(chan)
def connect_irc(self):
#If SSL is enabled use ssl
if(SSL):
self.s = ssl.wrap_socket(socket.socket( ))
else:
self.s = socket.socket( )
self.s.connect((HOST, PORT))
self.lock = threading.Lock()
self.joined_chans = []
thread = threading.Thread(target = self.queue_tick)
thread.daemon = True
thread.start()
# As of RFC 2812, USER message params are: <user> <mode> <unused> <realname>
self.queue_raw("USER " + NICK + " 0 * :" + NICK)
self.queue_raw("NICK " + NICK)
print "*** Connecting... ***"
while 1:
line = self.s.recv(2048)
line = line.strip("\r\n")
if("End of /MOTD command." in line):
break
else:
print line
self.join_irc(chan = HOME_CHANNEL, record = False)
for channel in self.confman.get_value("IRC", "CHANS", []):
self.join_irc(chan = channel, record = False)
#Define reconnect function
def reconnect_mpd(self):
try:
self.mpc.disconnect()
except mpd.ConnectionError:
pass
self.mpc.connect(MPD_HOST, MPD_PORT)
def reconnect_irc(self):
try:
self.s.close()
except:
pass
self.s = None
self.connect_irc()
#generic send function
def gen_send(self, text):
try:
ret_type = thread_types[threading.current_thread().ident]
except KeyError:
ret_type = {"type": "PRIVMSG", "source": HOME_CHANNEL}
if ret_type["type"] == "PRIVMSG" or ret_type["type"] == "regex":
try:
if not ret_type["prefix"] == "":
ret_type["prefix"] = "%s: " % ret_type["prefix"]
self.privmsg(text, ret_type["source"], ret_type["prefix"])
except KeyError:
self.privmsg(text, ret_type["source"])
elif ret_type == "HTTP":
http_responses[threading.current_thread().ident] = text
#Define private message function
# Splitting is something that should be taken care of beforehand.
def privmsg(self, text, channel=HOME_CHANNEL, prefix=""):
for msg in str(text).split("\n"):
if not msg.split() == "":
if IRC:
self.queue_raw("PRIVMSG " + channel + " :" + prefix + str(msg))
else:
print prefix + str(msg)
|
"""
Display recursively the content of a given cell of a view.
"""
__docformat__ = 'restructuredtext'
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
import vitables.utils
def getArrayDimensions(shape):
"""
Get the dimensions of the grid where the cell will be zoomed.
The zoomed cell contains a `numpy` array and will be displayed
in a table with the same shape than it.
:Parameter shape: the cell shape
:Returns: a tuple (rows, columns)
"""
# Numpy scalars are special a case
if shape == ():
nrows = ncols = 1
# 1-D arrays are displayed as a 1-column vector with nrows
elif len(shape) == 1:
nrows = shape[0]
ncols = 1
# N-D arrays are displayed as a matrix (nrowsXncols) which
# elements are (N-2)-D arrays
else:
nrows = shape[0]
ncols = shape[1]
return (nrows, ncols)
class ZoomCell(QtWidgets.QMdiSubWindow):
"""
Display an array/table cell on its own view (table widget).
When a leaf is displayed in a view, is quite usual that the content
of some cells is not fully visible because it doesn't fit into
the cell. To alleviate this problem this class provides a way to,
recursively, display the content of any cell on its own view.
The cell content depends on the kind of leaf and the shape of its
atom. Cells of `Table` views and `E/C/Array` views can be:
- a ``numpy`` scalar. `Atom` shape is ()
- a ``numpy`` array. `Atom` shape is not ()
In addition, cells of `VLArray` views can be:
- a serialized `Python` object. `Atom` kind is `object`, shape is ()
- a `Python` string. `Atom` kind is `vlstring`, shape is ()
Finally, cells of `Table` views also can be:
- a `numpy.void` object when the cell corresponds to nested field of the
record
:Parameters:
- `data`: the value stored in the cell being zoomed
- `title`: the base string for the zoomed view title
- `workspace`: the parent of the zoomed view
- `leaf`: a LeafNode instance
"""
def __init__(self, data, title, workspace, leaf):
"""
Creates a zoom view for a given cell.
The passed cell is an element of a given dataset. It is always
a (potentially nested) `numpy` array. See cell accessor methods
in the `Buffer` class for details.
"""
self.data = data
self.title = title
self.workspace = workspace
self.data_shape = self.hasShape()
self.field_names = []
# Create and customise the widget that will display the zoomed cell
# The pindex attribute is required to keep working the code for
# synchronising workspace and tree of databases view.
# The leaf attribute is required to keep working the code for
# cleaning the workspace when a file is closed.
# The WA_DeleteOnClose flag makes that when the widget is
# closed either programatically (see VTAPP.windowClose)
# or by the user (clicking the close button in the titlebar)
# the widget is hidden AND destroyed --> the workspace
# updates automatically its list of open windows --> the
# Windows menu content is automatically updated
super(ZoomCell, self).__init__(workspace)
self.pindex = None
self.dbt_leaf = leaf
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# The internal widget
self.grid = QtWidgets.QTableWidget()
self.setWidget(self.grid)
# Configure the titlebar
self.setWindowTitle(self.title)
icons_dictionary = vitables.utils.getIcons()
self.setWindowIcon(icons_dictionary['zoom-in'])
# Decide how the cell content will be formatted. Content can be:
# - a numpy array
# - either a string or a unicode string
# - other Python object
if self.data_shape:
self.formatContent = vitables.utils.formatArrayContent
elif isinstance(self.data, str):
self.formatContent = vitables.utils.formatStringContent
else:
self.formatContent = vitables.utils.formatObjectContent
# Setup grid dimensions
(nrows, ncols) = self.getGridDimensions()
self.grid.setColumnCount(ncols)
self.grid.setRowCount(nrows)
# Setup grid editing
self.grid.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
# Setup grid horizontal header
if self.field_names:
for section in range(0, ncols):
item = QtWidgets.QTableWidgetItem()
item.setText(self.field_names[section])
self.grid.setHorizontalHeaderItem(section, item)
else:
for section in range(0, ncols):
item = QtWidgets.QTableWidgetItem()
item.setText('{0}'.format(section))
self.grid.setHorizontalHeaderItem(section, item)
# Setup grid vertical header
for section in range(0,nrows):
item = QtWidgets.QTableWidgetItem()
item.setText('{0}'.format(section))
self.grid.setVerticalHeaderItem(section, item)
# Fill the grid
if self.field_names:
self.zoomTable()
else:
self.zoomArray()
self.show()
rmode = QtWidgets.QHeaderView.Stretch
if self.grid.columnCount() == 1:
self.grid.horizontalHeader().setSectionResizeMode(rmode)
if self.grid.rowCount() == 1:
self.grid.verticalHeader().setSectionResizeMode(rmode)
# Connect signals to slots
self.grid.cellDoubleClicked.connect(self.zoomView)
def hasShape(self):
"""Find out if the zoomed cell has a shape attribute."""
return hasattr(self.data, 'shape')
def getGridDimensions(self):
"""
Get the dimensions of the grid where the zoomed cell will be displayed.
:Returns: a tuple (rows, columns)
"""
if self.data_shape:
# The cell contains a numpy object
shape = self.data.shape
dtype = self.data.dtype
if dtype.fields is None:
# Arrays with nested fields come here
return getArrayDimensions(shape)
else:
# Table nested fields come here
return self.getNestedFieldDimensions()
else:
# The cell contains a Python object
return self.getPyObjectDimensions()
def getPyObjectDimensions(self):
"""
Get the dimensions of the grid where the cell will be zoomed.
The zoomed cell contains a `Python` object.
:Returns: a tuple (rows, columns)
"""
if isinstance(self.data, list) or isinstance(self.data, tuple):
return (len(self.data), 1)
else:
return (1, 1)
def getNestedFieldDimensions(self):
"""
Get the dimensions of the grid where the cell will be zoomed.
The zoomed cell contains a nested field (a field made of inner
fields, than can be nested or not) and will be displayed in a
table with only one row and one column per (top - 1) level field.
The dtype.descr attribute describes the inner fields. A field
description is a tuple with one of the following formats:
- (field_name, format) field is not nested, shape=()
- (field_name, format, shape) field is not nested, shape!=()
- (field_name, descr) field is nested
:Returns: a tuple (rows, columns)
"""
self.field_names = [item[0] for item in self.data.dtype.descr]
ncols = len(self.field_names)
nrows = 1
return (nrows, ncols)
def zoomTable(self):
"""Fill the zoom view with the content of the clicked nested field."""
for column in range(0, self.grid.columnCount()):
content = self.data[self.field_names[column]]
text = self.formatContent(content)
item = QtWidgets.QTableWidgetItem(text)
self.grid.setItem(0, column, item)
def zoomArray(self):
"""Fill the zoom view with the content of the clicked cell."""
num_rows = self.grid.rowCount()
num_cols = self.grid.columnCount()
# Numpy scalars are displayed in a 1x1 grid
if num_rows == num_cols == 1:
content = self.data
text = self.formatContent(content)
item = QtWidgets.QTableWidgetItem(text)
self.grid.setItem(0, 0, item)
# 1-D arrays
elif num_cols == 1:
for row in range(0, num_rows):
content = self.data[row]
text = self.formatContent(content)
item = QtWidgets.QTableWidgetItem(text)
self.grid.setItem(row, 0, item)
# N-D arrays
else:
for row in range(0, num_rows):
for column in range(0, num_cols):
content = self.data[row][column]
text = self.formatContent(content)
item = QtWidgets.QTableWidgetItem(text)
self.grid.setItem(row, column, item)
def zoomView(self, row, col):
"""Makes the content of the clicked cell fully visible.
:Parameters:
- `row`: the row of the clicked cell
- `col`: the column of the clicked cell
"""
# Check if the zoom has to be done
if self.data_shape:
if not (self.data.shape !=() or self.field_names):
return
elif not (isinstance(self.data, list) or isinstance(self.data, tuple)):
return
# Get data
if self.data_shape:
# Arrays and table nested fields
if self.field_names:
cell = self.data[self.field_names[col]]
elif len(self.data.shape) > 1:
cell = self.data[row][col]
elif len(self.data.shape) == 1:
cell = self.data[row]
else:
# Python lists and tuples
cell = self.data[row]
# Get caption
if self.field_names:
caption = '{0}: {1}[{2}]'.format(self.title,
self.field_names[col], row)
else:
caption = '{0}: ({1}, {2})'.format(self.title, row, col)
ZoomCell(cell, caption, self.workspace, self.dbt_leaf)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('erudit', '0068_article_has_copyright_restriction'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='has_copyright_restriction',
),
migrations.AlterField(
model_name='article',
name='external_pdf_url',
field=models.URLField(blank=True, help_text="Renseigner si le PDF de l'article est hébergé à l'extérieur de la plateforme Érudit", null=True, verbose_name='URL PDF'),
),
migrations.AlterField(
model_name='article',
name='external_url',
field=models.URLField(blank=True, help_text="Renseigner si l'article est hébergé à l'extérieur de la plateforme Érudit", null=True, verbose_name='URL'),
),
migrations.AlterField(
model_name='eruditdocument',
name='localidentifier',
field=models.CharField(db_index=True, help_text='Identifiant Fedora du document', max_length=100, unique=True, verbose_name='Identifiant unique'),
),
]
|
import os, sys, operator
try:
file_dis = sys.argv[1]
ord_colon = int(sys.argv[2])
except:
print("Use: "+ sys.argv[0] + " `file_name` column (with column>=0)")
print("Sort the lines of the file in such a way that the chosen column is growing")
sys.exit(1)
file_ord=file_dis+'_ord' # sorted file
out_file=open(file_ord,'w')
in_file=open(file_dis,'r')
cont_file=in_file.readlines()
aux=[]
for i in range(0,len(cont_file),1):
aux.append([i, (cont_file[i].split())[ord_colon]])
aux_sort=sorted(aux, key=operator.itemgetter(1))
for i in range(0,len(cont_file),1):
out_file.write(cont_file[aux_sort[i][0]])
in_file.close()
out_file.close()
|
__author__ = 'calthorpe_analytics'
class Range:
def __init__(self, start, end):
self.start = start
self.end = end
def length(self):
return self.end - self.start
def overlaps(self, other):
return not(self.end < other.start or other.end < self.start)
def name(self):
return '_'.join([str(self.start), str(self.end)])
def __unicode__(self):
return u'%s' % self.name()
def __str__(self):
return self.__unicode__().encode('utf-8')
def make_ranges(min, max, count, explicit_increments=[]):
full_range = max-min
increment = full_range/count
if len(explicit_increments) > 0:
if len(explicit_increments)+1 != count:
raise Exception("explicit_increments count ({0}) is not one less than count ({1})".format(len(explicit_increments), count))
all_increments = [min]+explicit_increments+[max]
new_ranges_index_list = range(len(all_increments)-1)
new_ranges = [Range(all_increments[i], all_increments[i+1]) for i in new_ranges_index_list]
return new_ranges
else:
new_ranges = [Range(min+increment*i, min+increment*(i+1)) for i in range(count)]
return new_ranges
def make_increments(min, max, count):
full_range = max-min
# Decrease the count so that our last increment is max
increment = full_range/count-1
# Creates a sequence starting a min and ending at max, with intermediates equidistant
increments = [(min + increment * i) for i in range(count)]
return increments
|
"""
Check for problems that might break the build in non-IMP repository code:
- a module without a README.md
- missing or incomplete submodules
"""
import os
import sys
import os.path
import shutil
import platform
import tools
def check_readme():
for module, g in tools.get_modules("."):
if not os.path.exists(os.path.join(g, "README.md")):
sys.stderr.write("Each module must have a file 'README.md' describing what what it does and who wrote it.\n")
sys.stderr.write(module, "does not.\n")
exit(1)
def main():
check_readme()
if __name__ == '__main__':
main()
|
from math import inf
from lib import rev_range
def search_free(arr):
"""
Given the price of a stock over n days. Buy & sell are unlimited. Returns the maximum possible profit.
Solution is greedy. Time complexity is O(n). Space complexity is O(1).
:param arr: list[num]
:return: num
"""
n = len(arr)
if n <= 1:
return 0
s = 0
for i in range(1, n):
if arr[i-1] < arr[i]:
s += arr[i] - arr[i-1]
return s
def search2(arr):
"""
Given the price of a stock over n days. Allow at most 2 transactions. Returns the maximum possible profit.
Observation:
max_profit = max(for each day i, first sell is <= day i, second buy-in is >= day i)
= max(for each day i, first sell is on day i, second buy-in is >= day i)
Note the possibility of selling & buying on day i, which merges two transactions.
Solution is a 2-pass scan. Time complexity is O(n). Space complexity is O(n).
:param arr: list[num]
:return: num
"""
n = len(arr)
if n <= 1:
return 0
pre_min = arr[0]
fst = [0] # fst[i]: maximum profit trading at most, considering arr[:i+1]
for x in arr[1:]:
pre_min = min(x, pre_min)
fst.append(max(fst[-1], x - pre_min))
profit = 0
post_max = arr[-1]
for i in rev_range(n):
post_max = max(arr[i], post_max)
profit = max(profit, post_max - arr[i] + fst[i])
return profit
def search2_2(arr):
hold1 = hold2 = -inf # profit after buying in the 1st and 2nd time
release1 = release2 = 0 # profit after selling out the 1st and 2nd time
for x in arr:
release2 = max(release2, hold2 + x)
hold2 = max(hold2, release1 - x)
release1 = max(release1, hold1 + x)
hold1 = max(hold1, -x)
return release2
def search_k(arr, k): # at most k transactions. same observation as search_2. O(kn) time & space
assert k > 0
n = len(arr)
dp = [[0] * n for _ in range(k)] # dp[c][i]: selling on <= day i to complete transaction c+1, maximum total profit
min_up_to = arr[0] # min_up_to: min(arr[:i+1])
for i in range(1, n): # first transaction
min_up_to = min(min_up_to, arr[i])
dp[0][i] = max(dp[0][i - 1], arr[i] - min_up_to)
for c in range(1, k):
max_diff = -arr[0]
for i in range(1, n):
max_diff = max(max_diff, dp[c-1][i-1] - arr[i-1]) # sold c-1 transaction <= day i-1, bought on day i-1. possible duplication
dp[c][i] = max(dp[c][i-1], max_diff + arr[i])
return dp[-1][-1]
def search_k2(arr, k):
n = len(arr)
if n <= 1 or k == 0:
return 0
simple = [] # local min & max
inc = False # whether x is in an increasing subarray
for i in range(1, n):
if arr[i] > arr[i-1] and not inc: # i-1 is a local min
inc = True
simple.append(arr[i-1])
if arr[i] < arr[i-1] and inc: # i-1 is a local max
inc = False
simple.append(arr[i-1])
if inc: # the last element is in an increasing subarray
simple.append(arr[-1])
n = len(simple)
if k >= n // 2: # at most n // 2 transactions are possible
return sum(max(0, y - x) for x, y in zip(simple, simple[1:]))
hold = [-inf] * k
release = [0] * k
for x in simple:
last = 0
for i in range(k):
hold[i] = max(hold[i], last - x)
release[i] = max(release[i], hold[i] + x)
last = release[i]
return release[-1]
def search4(arr): # one day cooldown after sell
release = 0 # free to buy
hold = cool = -inf # hold: have not sold; cool: just sold
for x in arr:
release, hold, cool = max(release, cool), max(hold, release - x), hold + x # takes effect in the next iteration
return max(release, cool)
if __name__ == '__main__':
from random import randint
def control_2(arr): # O(n^4)
m, n = 0, len(arr)
for i in range(n):
for j in range(i, n):
for k in range(j, n):
for l in range(k, n):
m = max(m, arr[j] - arr[i] + arr[l] - arr[k])
return m
assert search_free([100, 180, 260, 310, 40, 535, 695]) == 865
# test search2
for k, v in {(2, 30, 15, 10, 8, 25, 80): 100}.items():
assert search2(k) == search2_2(k) == v
for size in [x for x in range(2, 25) for _ in range(x)]:
a = [randint(0, size * 2) for _ in range(size)]
assert search2(a) == search_k(a, 2) == search_k2(a, 2) == control_2(a)
# test search_k
for k, v in {((10, 22, 5, 75, 65, 80), 2): 87,
((12, 14, 17, 10, 14, 13, 12, 15), 3): 12,
((100, 30, 15, 10, 8, 25, 80), 3): 72,
((90, 80, 70, 60, 50), 1): 0}.items():
assert search_k(*k) == search_k2(*k) == v
|
class Tournament:
def __init__(self, id, name, venueID, round, player_count, last_winnerID, awardID):
self.ID = id
self.Name = name
self.VenueID = venueID
self.Round = round
self.PlayerCount = player_count
self.LastWinnerID = last_winnerID
self.AwardID = awardID
self.VenueName = " "
self.LastWinnerName = " "
self.AwardName = " "
def getID(self):
return self.ID
def getName(self):
return self.Name
def getVenueID(self):
return self.VenueID
def getVenueName(self):
return self.VenueName
def getRound(self):
return self.Round
def getPlayerCount(self):
return self.PlayerCount
def getLastWinnerID(self):
return self.LastWinnerID
def getLastWinnerName(self):
return self.LastWinnerName
def getAwardID(self):
return self.AwardID
def getAwardName(self):
return self.AwardName
|
"""
Illustrative example for a numerical irreducible decomposition.
This python3 script illustrates a two-stage cascade to compute candidate
generic points on all components, on all dimensions of the solution set.
"""
pols = ['(x^2 + y^2 + z^2 - 1)*(y - x^2)*(x - 0.5);', \
'(x^2 + y^2 + z^2 - 1)*(z - x^3)*(y - 0.5);', \
'(x^2 + y^2 + z^2 - 1)*(z - x*y)*(z - 0.5);']
from phcpy.cascades import top_cascade, cascade_filter
(topemb, topsols0, topsols1) = top_cascade(3, 2, pols, 1.0e-8)
print('generic points on the two dimensional surface :')
for sol in topsols0:
print(sol)
input('hit enter to continue')
(lvl1emb, lvl1sols0, lvl1sols1) = cascade_filter(2, topemb, topsols1, 1.0e-8)
print('candidate generic points at level 1 :')
for sol in lvl1sols0:
print(sol)
from phcpy.sets import ismember_filter
fil1sols0 = ismember_filter(topemb, topsols0, 2, lvl1sols0)
print('number of points before filtering :', len(lvl1sols0))
print('number of points after filtering :', len(fil1sols0))
input('hit enter to continue')
print('the filtered witness points at dimension 1 :')
for sol in fil1sols0:
print(sol)
input('hit enter to continue')
(lvl0emb, lvl2sols) = cascade_filter(1, lvl1emb, lvl1sols1, 1.0e-8)
(lvl0emb, lvl2sols) = cascade_filter(1, lvl1emb, lvl1sols1, 1.0e-8)
fil0sols = ismember_filter(topemb, topsols0, 2, lvl2sols)
print('number of points before filtering :', len(lvl2sols))
print('number of points after filtering at dimension 2 :', len(fil0sols))
fil0sols = ismember_filter(lvl1emb, fil1sols0, 1, fil0sols)
print('number of points after filtering at dimension 1 :', len(fil0sols))
print('finished the cascade')
print('the solutions at the end of the cascade :')
for sol in fil0sols:
print(sol)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.