code
stringlengths 1
199k
|
|---|
from django.conf.urls.defaults import *
from views import *
from api import *
urlpatterns = patterns('',
# developer list view
url(r'^$', DeveloperListView.as_view()),
url(r'^add$', DeveloperAddView.as_view()),
url(r'^save/$', DeveloperPluginSaveView.as_view()),
url(r'^docs$', DeveloperDocsView.as_view()),
url(r'^(?P<plugin_hash>[^/]+)$', DeveloperDetailView.as_view(), name='developer-detail'),
url(r'^(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)$', DeveloperVersionView.as_view(), name='developer-version'),
url(r'^(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)/deps$', DeveloperDepsView.as_view(), name='developer-deps'),
url(r'^(?P<plugin_hash>[^/]+)/add$', DeveloperVersionView.as_view(), name='developer-version-add'),
#live preview
url(r'^live/(?P<version_id>[0-9]+)/$',
DeveloperLiveAdminView.as_view(), name='developer-live-admin'),
url(r'^live/(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)$',
DeveloperLiveView.as_view(), name='developer-live'),
# API urls
url(r'^checkname/$', CheckNameView.as_view()),
url(r'^deletedep/$', DeleteDepView.as_view()),
# Globalproxy
url(r'^api/databaseSchemas/$', DatabaseSchemasView.as_view()),
url(r'^api/getProfileInformation/$', getProfileInformationView.as_view()),
url(r'^api/getFingerprints/$', getFingerprintsView.as_view()),
url(r'^api/getFingerprints/(?P<quest_slug>[^/]+)$', getFingerprintsView.as_view()),
# FingerprintProxy
url(r'^api/getFingerprintUID/(?P<fingerprint>[^/]+)$', getFingerprintUIDView.as_view()),
url(r'^api/getAnswers/(?P<fingerprint>[^/]+)$', getAnswersView.as_view()),
# datastore
url(r'^api/store/getExtra/(?P<fingerprint>[^/]+)$', getExtraView.as_view()),
url(r'^api/store/getDocuments/(?P<fingerprint>[^/]+)$', getDocumentsView.as_view()),
url(r'^api/store/putDocuments/(?P<fingerprint>[^/]+)$', putDocumentsView.as_view()),
url(r'^api/store/getPublications/(?P<fingerprint>[^/]+)$', getPublicationsView.as_view()),
url(r'^api/store/getComments/(?P<fingerprint>[^/]+)$', getCommentsView.as_view()),
url(r'^api/store/putComment/(?P<fingerprint>[^/]+)$', putCommentView.as_view()),
# fast links to dependency latest revision
url(r'^file/(?P<plugin_hash>[^/]+)/(?P<version>[0-9]+)/(?P<filename>[^/]+)$',
DeveloperFileView.as_view(), name='developer-file'),
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import budgetdatapackage
import datapackage
import datetime
from nose.tools import raises
from datapackage import compat
class TestBudgetResource(object):
def setup(self):
self.values = {
'currency': 'ISK',
'dateLastUpdated': '2014-04-22',
'datePublished': '1982-04-22',
'fiscalYear': '2014',
'granularity': 'transactional',
'status': 'approved',
'type': 'expenditure',
'location': 'IS',
'url': 'http://iceland.is/budgets.csv'}
def test_create_resource(self):
resource = budgetdatapackage.BudgetResource(**self.values)
assert resource.currency == self.values['currency']
last_updated = datetime.datetime.strptime(
self.values['dateLastUpdated'], '%Y-%m-%d').date()
assert resource.dateLastUpdated == last_updated
published = datetime.datetime.strptime(
self.values['datePublished'], '%Y-%m-%d').date()
assert resource.datePublished == published
assert resource.fiscalYear == self.values['fiscalYear']
assert resource.granularity == self.values['granularity']
assert resource.status == self.values['status']
assert resource.type == self.values['type']
assert resource.location == self.values['location']
assert resource.url == self.values['url']
assert resource.standard == '1.0.0-alpha'
def test_resource_can_be_used_with_datapackage(self):
"""Checks if it's possible to create a datapackage with a
budget resource"""
moneys = budgetdatapackage.BudgetResource(**self.values)
finances = datapackage.DataPackage(
name="finances", license="PDDL", resources=[moneys])
assert finances.name == "finances"
assert len(finances.resources) == 1
assert finances.resources[0].granularity == self.values['granularity']
@raises(ValueError)
def test_create_resource_missing_required_field(self):
del self.values['fiscalYear']
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_currency(self):
self.values['currency'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_dateLastPublished(self):
self.values['dateLastUpdated'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_datePublished(self):
self.values['datePublished'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_fiscalYear(self):
self.values['fiscalYear'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_granularity(self):
self.values['granularity'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_status(self):
self.values['status'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_type(self):
self.values['type'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
@raises(ValueError)
def test_bad_location(self):
self.values['location'] = 'batman'
budgetdatapackage.BudgetResource(**self.values)
|
import os, sys
from pyxmpp2.jid import JID
from pyxmpp2.jabber.simple import send_message
jid = os.environ['KORINFERJID']
password = os.environ['KORINFERJIDPASSWD']
if len(sys.argv)!=4:
print("Usage:")
print("\t%s recipient_jid subject body" % (sys.argv[0],))
print("example:")
print("\t%s test1@localhost Test 'this is test'" % (sys.argv[0],))
sys.exit(1)
recpt,subject,body=sys.argv[1:]
jid = JID(jid)
if not jid.resource:
jid = JID(jid.node,jid.domain,"korinf")
recpt = JID(recpt)
send_message(jid,password,recpt,body,subject)
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
"""metalex is general tool for lexicographic and metalexicographic activities
Copyright (C) 2017 by Elvis MBONING
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact: levismboning@yahoo.fr
---------------------------------------------------------------------------
makeBalise transform extracted articles into well formed xml file.
It can also generate HTML file for article edition
Packages:
>>> sudo apt-get install python-html5lib
>>> sudo apt-get install python-lxml
>>> sudo apt-get install python-bs4
Usage:
>>> from metalex.dicXmilised import *
>>> dicoHtml(save=True)
"""
import metalex
from .composeArticle import *
from .dicXmlTool import *
import re
import sys
import codecs
import os
from bs4 import BeautifulSoup
from random import sample
from shutil import copyfile
from lxml import etree
from termcolor import colored
__all__ = ['BaliseXML', 'dico_html']
components = {
'xml' : {
'metalexMetadata' : ['metalexMetadata', 'projectName', 'author',
'dateCreation', 'comment', 'contributors', 'candidate'],
'metalexContent' : ['article', 'definition', 'example', 'figured', 'contrary',
'entry', 'flexion', 'category', 'gender', 'rection', 'phonetic',
'identificationComponent', 'treatmentComponent', 'cte_cat',
'processingUnit', 'cgr_pt', 'cgr_vrg', 'cgr_fpar', 'cgr_opar',
'cgr_ocrch', 'cgr_fcrch', 'metalexContent', 'cte_gender',
'metalexResultDictionary']
},
'tei' : {
'teiHeader' : ['teiHeader', 'text', 'TEI', 'fileDesc', 'titleStmt',
'title', 'publicationStmt', 'p', 'sourceDesc', 'author'],
'text' : ['body', 'head', 'entry', 'form', 'orth', 'gramGrp',
'sense', 'def', 'cite', 'quote', 'span', 'usg', 'bibl',
'pos', 'genre', 'number', 'pron', 'etym']
},
'lmf' : {
'GlobalInformation' : ['LexicalResource', 'feat', 'p', 'GlobalInformation'],
'Lexicon' : ['Lexicon', 'feat', 'LexicalEntry', 'WordForm',
'Definition', 'Sense', 'Lexicon']
},
'dtd' : ['ELEMENT', 'ATTRIBUTE', 'PCDATA', 'CDATA', 'REQUIRED', 'IMPLIED'],
'xsd' : []
}
codifArticles = []
def dico_html(save=False):
"""Build HTML editor file of the all articles
:return file: metalexViewerEditor.html
"""
print('\n --- %s %s \n\n' %(colored('Part 4: Generate Output formats', attrs=['bold']), '--'*25))
metalex.plugins
instanceHtml = BaliseHTML()
filepath = metalex.html_template
metalex.utils.create_temp()
if metalex.utils.in_dir('CopymetalexTemplate.html'):
copyfile(filepath, 'CopymetalexTemplate.html')
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
name = metalex.currentOcr+'_metalexViewerEditor.html'
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
else:
souphtl = instanceHtml.html_inject('CopymetalexTemplate.html')
if save:
metalex.utils.go_to_dicresult()
with codecs.open(name, 'w') as htmlresult:
htmlresult.write(souphtl)
metalex.utils.create_temp()
os.remove('CopymetalexTemplate.html')
message = "*"+name+"* has correctly been generated > Saved in dicTemp folder"
metalex.logs.manageLog.write_log(message)
print('\n\n --- %s --------------- \n\n' %colored('MetaLex Processes was ended: consult results data in "dicTemp" folder',
'green', attrs=['bold']))
class BaliseHTML():
def __init__(self):
self.resultHtml = ''
def html_inject(self, template):
"""Create prettify HTML file all previous data generated
:return str: html (prettify by BeautifulSoup)
"""
instanceXml = BaliseXML()
contentxml = instanceXml.put_xml(typ='xml', save=True)
metalex.utils.create_temp()
soupXml = BeautifulSoup(contentxml, "html.parser")
projectconf = metalex.utils.read_conf()
Hauthor, Hname = projectconf['Author'], projectconf['Projectname'],
Hdate,Hcomment = projectconf['Creationdate'], projectconf['Comment']
Hcontrib = projectconf['Contributors']
filetemplate = codecs.open(template, 'r', 'utf-8')
souphtml = BeautifulSoup(filetemplate, "html5lib")
content = souphtml.find('div', attrs={'id': 'all-articles'})
author = content.find('h3', attrs={'id': 'author'})
author.string = 'main: '+Hauthor
date = content.find('h5', attrs={'id': 'date'})
date.string = Hdate
descipt = content.find('p', attrs={'id': 'description'})
descipt.string = Hcomment
contrib = content.find('h4', attrs={'id': 'contributors'})
contrib.string = 'contributors: '+Hcontrib
project = content.find('h4', attrs={'id': 'projetname'})
project.string = Hname
articlesxml = soupXml.findAll('article')
articleshtml = souphtml.find('div', attrs={'id': 'mtl:articles'})
for x in articlesxml:
elementart = BeautifulSoup('<article id=""></article>', 'html5lib')
idart = x.get('id')
artlem = x.get_text()
elementart.article.append(artlem)
elementart.article['id'] = idart
articleshtml.append(elementart.find('article'))
listlemme = souphtml.find('ul', attrs={'id': 'list-articles'})
for x in articlesxml:
art = x.get_text()
idart = x.get('id')
lem = x.find('entry').get_text()
lemme = BeautifulSoup('<li class="w3-hover-light-grey"><span class="lemme" onclick="changeImage('+
"'"+idart+"'"+')">'+lem+'</span><span class="fa fa-plus w3-closebtn" onclick="add('+
"'"+idart+"'"+')"/></li>', 'html5lib')
listlemme.append(lemme.find('li'))
filetemplate.close()
self.resultHtml = souphtml.prettify('utf-8')
return self.resultHtml
class BaliseXML ():
"""Build XML file type (xml|tei|lmf) with global metadata of the project
:param typ: str
:return obj: instance of BaliseXML
"""
def __init__(self, typ="xml"):
self.typ = typ
def build_structure(self, data, Sfile=None, typ='dtd'):
return False
def message(self, name):
return "*"+name+"* dictionary articles formated in xml is"+\
" created > Saved in dicTemp folder"
def put_xml(self, typ='xml', save=False):
"""Create well formed (xml|tei|lmf) file with metadata and content xml
:return metalexXml
"""
metadata = self.xml_metadata(typ)
content = self.xml_content(typ)
metalex.utils.go_to_dicresult()
if typ == 'xml':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'.xml'
metalexXml = self.balise(metadata+content, 'metalexResultDictionary',
attr={'xmlns':'https://www.w3schools.com',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-in',
'xsi:schemaLocation':'metalexSchemaXML.xsd'})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'metalexResultDictionary', attr={})
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'tei':
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-TEI.xml'
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'TEI', typ= 'tei')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
if typ == 'lmf':
os.listdir('.')
if save:
name = 'metalex-'+metalex.projectName+'_'+metalex.currentOcr+'-LMF.xml'
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
if metalex.utils.in_dir(name):
with codecs.open(name, 'w', 'utf-8') as fle:
fle.write(metalexXmlTree.prettify(formatter=None))
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
else:
mge = self.message(name)
metalex.logs.manageLog.write_log(mge)
return metalexXml
else:
metalexXml = self.balise(metadata+content, 'LexicalResource', attr={'dtdVersion':'15'}, typ= 'lmf')
metalexXml = '<?xml version="1.0" encoding="UTF-8" ?>'+metalexXml
metalexXmlTree = BeautifulSoup(metalexXml, 'xml')
print(metalexXmlTree.prettify(formatter=None))
def xml_metadata(self, typ='xml'):
"""Create xml metadata file with configuration of the project
:return str: metadata
"""
metalex.utils.create_temp()
projectconf = metalex.utils.read_conf()
contribtab = projectconf['Contributors'].split(',') \
if projectconf['Contributors'].find(',') else projectconf['Contributors']
contrib = ''
if typ == 'xml':
author = self.balise(projectconf['Author'], 'author', typ)
name = self.balise(projectconf['Projectname'].strip(), 'projectName', typ)
date = self.balise(projectconf['Creationdate'].strip(), 'dateCreation', typ)
comment = self.balise(projectconf['Comment'], 'comment', typ)
if len(contribtab) > 1:
for data in contribtab: contrib += self.balise(data.strip(), 'candidate', typ)
else: contrib = self.balise(''.join(contribtab), 'candidate', typ)
contrib = self.balise(contrib, 'contributors', typ)
cont = name+author+date+comment+contrib
metadataxml = self.balise(cont, 'metalexMetadata', typ)
return metadataxml
if typ == 'tei':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += self.balise(data.strip(), 'span',
attr={'content':'contributor'}, typ='tei')
else: contrib = self.balise(''.join(contribtab), 'span', typ='tei')
author = self.balise(projectconf['Author'], 'author', typ='tei')
title = self.balise(projectconf['Projectname'], 'title', typ='tei')
RtitleStmt = self.balise(title, 'titleStmt', typ='tei')
pdate = self.balise(projectconf['Creationdate'], 'p', typ='tei')
pcomment = self.balise(projectconf['Comment'], 'p', typ='tei')
pcontrib = self.balise(contrib, 'p', attr={'content':'contributors'}, typ='tei')
Rpubli = self.balise(author+pdate+pcomment+pcontrib, 'publicationStmt', typ='tei')
sourc = self.balise('TEI metadata for metalex project output', 'p', typ='tei')
Rsourc = self.balise(sourc, 'sourceDesc', typ='tei')
RfilD = self.balise(RtitleStmt+Rpubli+Rsourc, 'fileDesc', typ='tei')
metadatatei = self.balise(RfilD, 'teiHeader', typ='tei')
return metadatatei
if typ == 'lmf':
if len(contribtab) > 1:
for data in contribtab:
if len(data) > 2: contrib += data.strip()+', '
else: contrib = ', '.join(contribtab)
enc = self.balise('', 'feat', attr={'att':'languageCoding', 'val':'utf-8'},
typ='lmf', sclose=True)
pauthor = self.balise('', 'feat', attr={'att':'author', 'val':projectconf['Author'].strip()},
typ='lmf', sclose=True)
pdate = self.balise('', 'feat', attr={'att':'dateCreation', 'val':projectconf['Creationdate'].strip()},
typ='lmf', sclose=True)
pname = self.balise('', 'feat', attr={'att':'projectName', 'val':projectconf['Projectname'].strip()},
typ='lmf', sclose=True)
pcomment = self.balise('', 'feat', attr={'att':'comment', 'val':projectconf['Comment'].strip()},
typ='lmf', sclose=True)
pcontrib = self.balise('', 'feat', attr={'att':'contributors', 'val':contrib.strip(', ')},
typ='lmf', sclose=True)
meta = self.balise('', 'p', attr={'att':'meta', 'val':'TEI metadata for metalex project output'},
typ='lmf', sclose=True)
metadatalmf = self.balise(enc+pauthor+pname+meta+pdate+pcomment+pcontrib, 'GlobalInformation', typ='lmf')
return metadatalmf
def balise_content_article (self):
data = get_data_articles('text')
cod = StructuredWithCodif(data, 'xml')
resultArticles = []
for art in cod.format_articles():
article_type_form(art)
if article_type_form(art) == '1':
partArt = re.search(r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>)\s(.+)<cgr_pt>\.</cgr_pt>)', art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
treat = self.balise(self.balise(treat, 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
if article_type_form(art) == '2':
research = r'(([a-zéèàûô]+)\s(<cte_cat>.+</cte_cat>\s<cte_gender>..</cte_gender>)\s(.+)<cgr_pt>\.</cgr_pt>)'
partArt = re.search(research, art, re.I)
if partArt != None:
ident, entry, cat, treat = partArt.group(1), partArt.group(2), partArt.group(3), partArt.group(4)
id = generate_id()
entry = self.balise(entry, 'entry')
ident = self.balise(entry+cat, 'identificationComponent')
if not re.search(r'(<cgr_pt>\.</cgr_pt>|<cte_cat>.+</cte_cat>|<cgr_vrg>,</cgr_vrg>)', partArt.group(4), re.I):
treat = self.balise(self.balise(treat+'.', 'definition'), 'processingUnit')
article = self.balise(ident+self.balise(treat, 'treatmentComponent'), 'article', attr={'id':id})
resultArticles.append(article)
elif partArt.group(4).find(' et ') != -1:
suite = 'hahaha'
return resultArticles
def xml_content(self, typ='xml', forme='text'):
"""Create xml content file (representing articles) with data articles extracting
:return str: contentXml
"""
content = ''
contentXml = ''
data = self.balise_content_article()
if typ == 'xml':
if forme == 'pickle':
data = get_data_articles('pickle')
for dicart in data:
for art in dicart.keys():
art = self.balise(dicart[art], 'article', art=True)
content += art
contentXml = self.balise(content, 'metalexContent')
return contentXml
else:
for art in data: content += art
contentXml = self.balise(content, 'metalexContent', attr={'totalArticle': str(len(data))})
return contentXml
if typ == 'tei':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise(orth, 'orth', {'id': atOrth}, typ='tei')
formB = self.balise(orth, 'form', attr={'xml:lang':'fr', 'type':'lemma'}, typ='tei')
pos = soupart.find('cte_cat').getText()
posB = self.balise(pos, 'pos', typ='tei')
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.': genB = self.balise(genB, 'genre', typ='tei')
gramgrp = self.balise(posB+genB, 'gramGrp', typ='tei')
sens = soupart.find('processingunit').getText().replace(' .', '.')
defi = self.balise(sens, 'def', typ='tei')
if sens != None: sens = self.balise(defi, 'sense', typ='tei')
entry = self.balise(formB+gramgrp+sens, 'entry', typ='tei')
content += entry
body = self.balise(content, 'body', typ='tei')
contentXml = self.balise(body, 'text', attr={'totalArticle': str(len(data))}, typ='tei')
return contentXml
if typ == 'lmf':
for art in data:
soupart = BeautifulSoup(art, 'html.parser')
orth = soupart.find('entry').getText()
atOrth = soupart.find('article').get('id')
orth = self.balise('', 'feat', attr={'att':'writtenForm','val':orth},
typ='lmf', sclose=True)
wordF = self.balise(orth, 'WordForm', attr={'id': atOrth}, typ='lmf')
pos = soupart.find('cte_cat').getText()
posB = self.balise('', 'feat', attr={'att':'partOfSpeech','val':pos},
typ='lmf', sclose=True)
genB = ''
if soupart.find('cte_gender'): genB = soupart.find('cte_gender').getText().strip()
if genB == 'f.' or genB == 'm.':
genB = self.balise('', 'feat', attr={'att':'grammaticalNumber','val': genB},
typ='lmf', sclose=True)
sens = soupart.find('processingunit').getText().replace(' .', '.')
sensnb = self.balise('', 'feat', attr={'att':'sensNumber','val':'1'},
typ='lmf', sclose=True)
definb = self.balise('', 'feat', attr={'att':'text','val':sens.strip()},
typ='lmf', sclose=True)
defi = self.balise(definb, 'Definition', typ='lmf')
if sens != None: sens = self.balise(sensnb+defi, 'Sense', typ='lmf')
entry = self.balise(wordF+posB+genB+sens, 'LexicalEntry', typ='lmf')
content += entry
body = self.balise('', 'feat', attr={'att':'language','val':'fra'},
typ='lmf', sclose=True)+content
contentXml = self.balise(body, 'Lexicon', attr={'totalArticle': str(len(data))}, typ='lmf')
return contentXml
def balise(self, element, markup, sclose=False, attr=None, typ='xml', art=False):
"""Markup data with a specific format type (xml|tei|lmf)
:return str: balised element
"""
if typ == 'xml':
if markup in components['xml']['metalexContent'] or markup \
in components['xml']['metalexMetadata']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'tei':
if markup in components['tei']['text'] or markup in components['tei']['teiHeader']:
if art:
element = self.chevron(markup, attr, art=True)+element+self.chevron(markup, attr, False)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
if typ == 'lmf':
if markup in components['lmf']['GlobalInformation'] \
or components['lmf']['Lexicon']:
if sclose:
element = self.chevron(markup, attr, True, sclose=True)
return element
else:
element = self.chevron(markup, attr)+element+self.chevron(markup, attr, False)
return element
def chevron(self, el, attr, openchev=True, art=False, sclose=False):
"""Put tag around the data of element
:return str: tagging element
"""
idart = generate_id()
if art and attr == None:
if openchev : return "<"+el+" id='"+idart+"' class='data-entry'"+">"
if not openchev: return "</"+el+">"
if sclose : return "<"+el+" id='"+idart+"'/>"
if art and attr != None:
allattrib = ''
for at in attr.keys():
allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose : return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+">"
if openchev and sclose: return "<"+el+" id='"+idart+"' class='data-entry'"+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr != None:
#print openchev
allattrib = ''
for at in attr.keys(): allattrib += ' '+at+'="'+attr[at]+'"'
if openchev and not sclose: return "<"+el+' '+allattrib+">"
if openchev and sclose: return "<"+el+' '+allattrib+"/>"
if not openchev: return "</"+el+">"
elif art == False and attr == None:
if openchev : return "<"+el+">"
if sclose : return "<"+el+"/>"
if not openchev: return "</"+el+">"
|
import cgitb
import fnmatch
import io
import logging
import click
import pyjsdoc
import pyjsparser
import sys
from .parser.parser import ModuleMatcher
from .parser.visitor import Visitor, SKIP
from . import jsdoc
class Printer(Visitor):
def __init__(self, level=0):
super(Printer, self).__init__()
self.level = level
def _print(self, text):
print ' ' * self.level, text
def enter_generic(self, node):
self._print(node['type'])
self.level += 1
def exit_generic(self, node):
self.level -= 1
def enter_Identifier(self, node):
self._print(node['name'])
return SKIP
def enter_Literal(self, node):
self._print(node['value'])
return SKIP
def enter_BinaryExpression(self, node):
self._print(node['operator'])
self.level += 1
def visit_files(files, visitor, ctx):
for name in files:
with io.open(name) as f:
ctx.logger.info("%s", name)
try:
yield visitor().visit(pyjsparser.parse(f.read()))
except Exception as e:
if ctx.logger.isEnabledFor(logging.DEBUG):
ctx.logger.exception("while visiting %s", name)
else:
ctx.logger.error("%s while visiting %s", e, name)
ABSTRACT_MODULES = [
jsdoc.ModuleDoc({
'module': 'web.web_client',
'dependency': {'web.AbstractWebClient'},
'exports': jsdoc.NSDoc({
'name': 'web_client',
'doc': 'instance of AbstractWebClient',
}),
}),
jsdoc.ModuleDoc({
'module': 'web.Tour',
'dependency': {'web_tour.TourManager'},
'exports': jsdoc.NSDoc({
'name': 'Tour',
'doc': 'maybe tourmanager instance?',
}),
}),
# OH FOR FUCK'S SAKE
jsdoc.ModuleDoc({
'module': 'summernote/summernote',
'exports': jsdoc.NSDoc({'doc': "totally real summernote"}),
})
]
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.option('-v', '--verbose', count=True)
@click.option('-q', '--quiet', count=True)
@click.pass_context
def autojsdoc(ctx, verbose, quiet):
logging.basicConfig(
level=logging.INFO + (quiet - verbose) * 10,
format="[%(levelname)s %(created)f] %(message)s",
)
ctx.logger = logging.getLogger('autojsdoc')
ctx.visitor = None
ctx.files = []
ctx.kw = {}
@autojsdoc.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def ast(ctx, files):
""" Prints a structure tree of the provided files
"""
if not files:
print(ctx.get_help())
visit_files(files, lambda: Printer(level=1), ctx.parent)
@autojsdoc.command()
@click.option('-m', '--module', multiple=True, help="Only shows dependencies matching any of the patterns")
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def dependencies(ctx, module, files):
""" Prints a dot file of all modules to stdout
"""
if not files:
print(ctx.get_help())
byname = {
mod.name: mod.dependencies
for mod in ABSTRACT_MODULES
}
for modules in visit_files(files, ModuleMatcher, ctx.parent):
for mod in modules:
byname[mod.name] = mod.dependencies
print('digraph dependencies {')
todo = set()
# if module filters, roots are only matching modules
if module:
for f in module:
todo.update(fnmatch.filter(byname.keys(), f))
for m in todo:
# set a different box for selected roots
print(' "%s" [color=orangered]' % m)
else:
# otherwise check all modules
todo.update(byname)
done = set()
while todo:
node = todo.pop()
if node in done:
continue
done.add(node)
deps = byname[node]
todo.update(deps - done)
for dep in deps:
print(' "%s" -> "%s";' % (node, dep))
print('}')
try:
autojsdoc.main(prog_name='autojsdoc')
except Exception:
print(cgitb.text(sys.exc_info()))
|
import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut.loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','KI'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'], ['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
res = anls.getPatternsInTransInGTPos(masterData, queryList)
|
from shoop.api.factories import viewset_factory
from shoop.core.api.orders import OrderViewSet
from shoop.core.api.products import ProductViewSet, ShopProductViewSet
from shoop.core.models import Contact, Shop
from shoop.core.models.categories import Category
def populate_core_api(router):
"""
:param router: Router
:type router: rest_framework.routers.DefaultRouter
"""
router.register("shoop/category", viewset_factory(Category))
router.register("shoop/contact", viewset_factory(Contact))
router.register("shoop/order", OrderViewSet)
router.register("shoop/product", ProductViewSet)
router.register("shoop/shop", viewset_factory(Shop))
router.register("shoop/shop_product", ShopProductViewSet)
|
from django.contrib import admin
from hub.models import ExtraUserDetail
from .models import AuthorizedApplication
admin.site.register(AuthorizedApplication)
admin.site.register(ExtraUserDetail)
|
from spack import *
class Libidl(AutotoolsPackage):
"""libraries for Interface Definition Language files"""
homepage = "https://developer.gnome.org/"
url = "https://ftp.gnome.org/pub/gnome/sources/libIDL/0.8/libIDL-0.8.14.tar.bz2"
version('0.8.14', sha256='c5d24d8c096546353fbc7cedf208392d5a02afe9d56ebcc1cccb258d7c4d2220')
depends_on('pkgconfig', type='build')
depends_on('glib')
|
import os, sys, random
pandoraPath = os.getenv('PANDORAPATH', '/usr/local/pandora')
sys.path.append(pandoraPath+'/bin')
sys.path.append(pandoraPath+'/lib')
from pyPandora import Config, World, Agent, SizeInt
class MyAgent(Agent):
gatheredResources = 0
def __init__(self, id):
Agent.__init__( self, id)
print('constructing agent: ',self.id)
def updateState(self):
print('updating state of: ',self.id)
newPosition = self.position
newPosition._x = newPosition._x + random.randint(-1,1)
newPosition._y = newPosition._y + random.randint(-1,1)
if self.getWorld().checkPosition(newPosition):
self.position = newPosition
self.gatheredResources = self.gatheredResources + self.getWorld().getValue('resources', self.position)
self.getWorld().setValue('resources', self.position, 0)
def registerAttributes(self):
self.registerIntAttribute('resources')
def serialize(self):
print('serializing MyAgent: ',self.id)
self.serializeIntAttribute('resources', self.gatheredResources)
class MyWorld(World):
def __init__(self, config):
World.__init__( self, config)
print('constructing MyWorld')
def createRasters(self):
print('creating rasters')
self.registerDynamicRaster("resources", 1)
self.getDynamicRaster("resources").setInitValues(0, 10, 0)
return
def createAgents(self):
print('creating agents')
for i in range (0, 10):
newAgent = MyAgent('MyAgent_'+str(i))
self.addAgent(newAgent)
newAgent.setRandomPosition()
def main():
print('getting started with pyPandora')
numTimeSteps = 10
worldSize = SizeInt(64,64)
myConfig = Config(worldSize, numTimeSteps)
myWorld = MyWorld(myConfig)
myWorld.initialize()
myWorld.run()
print('simulation finished')
if __name__ == "__main__":
main()
|
import unittest
import json
from datetime import datetime
from pymongo import MongoClient
from apps.basic_resource import server
from apps.basic_resource.documents import Article, Comment, Vote
class ResourcePostListFieldItemListField(unittest.TestCase):
"""
Test if a HTTP POST that adds entries to a listfield in a item of a
listfield on a resource gives the right response and adds the data
in the database.
"""
@classmethod
def setUpClass(cls):
cls.app = server.app.test_client()
cls.mongo_client = MongoClient()
comment_id = "528a5250aa2649ffd8ce8a90"
cls.initial_data = {
'title': "Test title",
'text': "Test text",
'publish': True,
'publish_date': datetime(2013, 10, 9, 8, 7, 8),
'comments': [
Comment(
id=comment_id,
text="Test comment old",
email="test@example.com",
upvotes=[
Vote(
ip_address="1.4.1.2",
date=datetime(2012, 5, 2, 9, 1, 3),
name="Jzorz"
),
Vote(
ip_address="2.4.5.2",
date=datetime(2012, 8, 2, 8, 2, 1),
name="Nahnahnah"
)
]
),
Comment(
text="Test comment 2 old",
email="test2@example.com",
upvotes=[
Vote(
ip_address="1.4.1.4",
date=datetime(2013, 5, 2, 9, 1, 3),
name="Zwefhalala"
),
Vote(
ip_address="2.4.9.2",
date=datetime(2013, 8, 2, 8, 2, 1),
name="Jhardikranall"
)
]
),
],
'top_comment': Comment(
text="Top comment",
email="test@example.com",
upvotes=[
Vote(
ip_address="5.4.1.2",
date=datetime(2012, 5, 2, 9, 2, 3),
name="Majananejjeew"
),
Vote(
ip_address="2.4.1.2",
date=datetime(2012, 3, 2, 8, 2, 1),
name="Hoeieieie"
)
]
),
'tags': ["tag1", "tag2", "tag3"]
}
article = Article(**cls.initial_data).save()
cls.add_data = {
'ip_address': "5.5.5.5",
'name': "Wejejejeje"
}
cls.response = cls.app.post(
'/articles/{}/comments/{}/upvotes/'.format(
unicode(article['id']),
comment_id
),
headers={'content-type': 'application/json'},
data=json.dumps(cls.add_data)
)
@classmethod
def tearDownClass(cls):
cls.mongo_client.unittest_monkful.article.remove()
def test_status_code(self):
"""
Test if the response status code is 201.
"""
self.assertEqual(self.response.status_code, 201)
def test_content_type(self):
"""
Test if the content-type header is 'application/json'.
"""
self.assertEqual(
self.response.headers['content-type'],
'application/json'
)
def test_json(self):
"""
Test if the response data is valid JSON.
"""
try:
json.loads(self.response.data)
except:
self.fail("Response is not valid JSON.")
def test_content(self):
"""
Test if the deserialized response data evaluates back to our
data we posted to the resource in `setUpClass`.
"""
response_data = json.loads(self.response.data)
# Remove the date field because it's auto generated and we
# didn't include it in the original posted data.
del response_data['date']
self.assertEqual(response_data, self.add_data)
def test_documents(self):
"""
Test if the POST-ed data really ended up in the document
"""
upvotes = Article.objects[0].comments[0].upvotes
self.assertEqual(len(upvotes), 3)
self.assertEqual(upvotes[2].ip_address, self.add_data['ip_address'])
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('narglatch_sick')
mobileTemplate.setLevel(21)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(60)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(45)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(40)
mobileTemplate.setSocialGroup("narglatch")
mobileTemplate.setAssistRange(2)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_narglatch_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('narglatch_sick', mobileTemplate)
return
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('water_thief')
mobileTemplate.setLevel(5)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("thug")
mobileTemplate.setAssistRange(4)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_moisture_thief.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/sword/shared_sword_01.iff', WeaponType.ONEHANDEDMELEE, 1.0, 5, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('saberhit')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('water_thief', mobileTemplate)
return
|
from charm.toolbox.pairinggroup import PairingGroup,GT,extract_key
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
from charm.toolbox.ABEnc import ABEnc
from charm.schemes.abenc.abenc_lsw08 import KPabe
debug = False
class HybridABEnc(ABEnc):
"""
>>> from charm.schemes.abenc.abenc_lsw08 import KPabe
>>> group = PairingGroup('SS512')
>>> kpabe = KPabe(group)
>>> hyb_abe = HybridABEnc(kpabe, group)
>>> access_policy = ['ONE', 'TWO', 'THREE']
>>> access_key = '((FOUR or THREE) and (TWO or ONE))'
>>> msg = b"hello world this is an important message."
>>> (master_public_key, master_key) = hyb_abe.setup()
>>> secret_key = hyb_abe.keygen(master_public_key, master_key, access_key)
>>> cipher_text = hyb_abe.encrypt(master_public_key, msg, access_policy)
>>> hyb_abe.decrypt(cipher_text, secret_key)
b'hello world this is an important message.'
"""
def __init__(self, scheme, groupObj):
ABEnc.__init__(self)
global abenc
# check properties (TODO)
abenc = scheme
self.group = groupObj
def setup(self):
return abenc.setup()
def keygen(self, pk, mk, object):
return abenc.keygen(pk, mk, object)
def encrypt(self, pk, M, object):
key = self.group.random(GT)
c1 = abenc.encrypt(pk, key, object)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, ct, sk):
c1, c2 = ct['c1'], ct['c2']
key = abenc.decrypt(c1, sk)
cipher = AuthenticatedCryptoAbstraction(extract_key(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
kpabe = KPabe(groupObj)
hyb_abe = HybridABEnc(kpabe, groupObj)
access_key = '((ONE or TWO) and THREE)'
access_policy = ['ONE', 'TWO', 'THREE']
message = b"hello world this is an important message."
(pk, mk) = hyb_abe.setup()
if debug: print("pk => ", pk)
if debug: print("mk => ", mk)
sk = hyb_abe.keygen(pk, mk, access_key)
if debug: print("sk => ", sk)
ct = hyb_abe.encrypt(pk, message, access_policy)
mdec = hyb_abe.decrypt(ct, sk)
assert mdec == message, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()
|
from colour import *
from cartesian import *
from timeit import *
def test_colour():
b = colour_create(0, 0, 0, 0)
for i in range(1, 100000):
c = colour_create(.5, .5, .5, 0)
b = colour_add(b, c)
def test_cartesian():
b = cartesian_create(0, 0, 0)
for i in range(1, 50000):
c = cartesian_create(.5, .5, .5)
b = cartesian_normalise(cartesian_add(b, c))
d = cartesian_dot(c, b)
e = cartesian_cross(c, b)
|
from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..utils import (
compat_str,
compat_urllib_parse,
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
|
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from op_test import OpTest
paddle.enable_static()
np.random.seed(1)
def maxout_forward_naive(x, groups, channel_axis):
s0, s1, s2, s3 = x.shape
if channel_axis == 1:
return np.ndarray([s0, s1 // groups, groups, s2, s3], \
buffer = x, dtype=x.dtype).max(axis=2)
return np.ndarray([s0, s1, s2, s3 // groups, groups], \
buffer = x, dtype=x.dtype).max(axis=4)
class TestMaxOutOp(OpTest):
def setUp(self):
self.op_type = "maxout"
self.dtype = 'float64'
self.shape = [3, 6, 2, 4]
self.groups = 2
self.axis = 1
self.set_attrs()
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = maxout_forward_naive(x, self.groups, self.axis)
self.inputs = {'X': x}
self.attrs = {'groups': self.groups, 'axis': self.axis}
self.outputs = {'Out': out}
def set_attrs(self):
pass
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMaxOutOpAxis0(TestMaxOutOp):
def set_attrs(self):
self.axis = -1
class TestMaxOutOpAxis1(TestMaxOutOp):
def set_attrs(self):
self.axis = 3
class TestMaxOutOpFP32(TestMaxOutOp):
def set_attrs(self):
self.dtype = 'float32'
class TestMaxOutOpGroups(TestMaxOutOp):
def set_attrs(self):
self.groups = 3
class TestMaxoutAPI(unittest.TestCase):
# test paddle.nn.Maxout, paddle.nn.functional.maxout
def setUp(self):
self.x_np = np.random.uniform(-1, 1, [2, 6, 5, 4]).astype(np.float64)
self.groups = 2
self.axis = 1
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
for r in res:
self.assertTrue(np.allclose(out_ref, r))
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x)
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
for r in [out1, out2]:
self.assertTrue(np.allclose(out_ref, r.numpy()))
out3 = F.maxout(x, self.groups, -1)
out3_ref = maxout_forward_naive(self.x_np, self.groups, -1)
self.assertTrue(np.allclose(out3_ref, out3.numpy()))
paddle.enable_static()
def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis)
self.assertTrue(np.allclose(out_ref, res[0]))
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.maxout(x, groups=self.groups, axis=self.axis)
self.assertTrue(np.allclose(out_ref, out.numpy()))
paddle.enable_static()
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.maxout, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 4, 6, 8], dtype='int32')
self.assertRaises(TypeError, F.maxout, x_int32)
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
if __name__ == '__main__':
unittest.main()
|
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cqlengine.columns import Column, Set, List, Text
from cassandra.cqlengine.operators import *
from cassandra.cqlengine.statements import (UpdateStatement, WhereClause,
AssignmentClause, SetUpdateClause,
ListUpdateClause)
import six
class UpdateStatementTests(unittest.TestCase):
def test_table_rendering(self):
""" tests that fields are properly added to the select statement """
us = UpdateStatement('table')
self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us))
self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))
def test_rendering(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us))
us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us))
def test_context(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})
def test_context_update(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
us.update_context_id(3)
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s')
self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})
def test_additional_rendering(self):
us = UpdateStatement('table', ttl=60)
us.add_assignment(Column(db_field='a'), 'b')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertIn('USING TTL 60', six.text_type(us))
def test_update_set_add(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set((1,)), 'add')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s')
def test_update_empty_set_add_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'add')
self.assertFalse(us.assignments)
def test_update_empty_set_removal_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'remove')
self.assertFalse(us.assignments)
def test_update_list_prepend_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'prepend')
self.assertFalse(us.assignments)
def test_update_list_append_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'append')
self.assertFalse(us.assignments)
|
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack import wsgi
from nova.tests.functional.api import client
from nova.tests.functional import api_paste_fixture
from nova.tests.functional import test_servers
from nova.tests.unit import fake_network
class LegacyV2CompatibleTestBase(test_servers.ServersTestBase):
_api_version = 'v2'
def setUp(self):
self.useFixture(api_paste_fixture.ApiPasteV2CompatibleFixture())
super(LegacyV2CompatibleTestBase, self).setUp()
self._check_api_endpoint('/v2', [compute.APIRouterV21,
openstack.LegacyV2CompatibleWrapper])
def test_request_with_microversion_headers(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
def test_request_without_addtional_properties_check(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test", "foooooo": "barrrrrr"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
def test_request_with_pattern_properties_check(self):
fake_network.set_stub_network_methods(self.stubs)
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self._wait_for_state_change(created_server, 'BUILD')
response = self.api.post_server_metadata(created_server['id'],
{'a': 'b'})
self.assertEqual(response, {'a': 'b'})
def test_request_with_pattern_properties_with_avoid_metadata(self):
fake_network.set_stub_network_methods(self.stubs)
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
exc = self.assertRaises(client.OpenStackApiException,
self.api.post_server_metadata,
created_server['id'],
{'a': 'b',
'x' * 300: 'y',
'h' * 300: 'i'})
self.assertEqual(exc.response.status_code, 400)
|
__author__ = 'lorenzo'
from google.appengine.ext import vendor
vendor.add('lib')
|
import xmlrpclib
import uuid
from handler.geni.v3.extensions.sfa.trust.certificate import Certificate
from handler.geni.v3.extensions.sfa.util.faults import GidInvalidParentHrn, GidParentHrn
from handler.geni.v3.extensions.sfa.util.sfalogging import logger
from handler.geni.v3.extensions.sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
def create_uuid():
return str(uuid.uuid4().int)
class GID(Certificate):
uuid = None
hrn = None
urn = None
email = None # for adding to the SubjectAltName
##
# Create a new GID object
#
# @param create If true, create the X509 certificate
# @param subject If subject!=None, create the X509 cert and set the subject name
# @param string If string!=None, load the GID from a string
# @param filename If filename!=None, load the GID from a file
# @param lifeDays life of GID in days - default is 1825==5 years
def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):
Certificate.__init__(self, lifeDays, create, subject, string, filename)
if subject:
logger.debug("Creating GID for subject: %s" % subject)
if uuid:
self.uuid = int(uuid)
if hrn:
self.hrn = hrn
self.urn = hrn_to_urn(hrn, 'unknown')
if urn:
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def set_uuid(self, uuid):
if isinstance(uuid, str):
self.uuid = int(uuid)
else:
self.uuid = uuid
def get_uuid(self):
if not self.uuid:
self.decode()
return self.uuid
def set_hrn(self, hrn):
self.hrn = hrn
def get_hrn(self):
if not self.hrn:
self.decode()
return self.hrn
def set_urn(self, urn):
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def get_urn(self):
if not self.urn:
self.decode()
return self.urn
# Will be stuffed into subjectAltName
def set_email(self, email):
self.email = email
def get_email(self):
if not self.email:
self.decode()
return self.email
def get_type(self):
if not self.urn:
self.decode()
_, t = urn_to_hrn(self.urn)
return t
##
# Encode the GID fields and package them into the subject-alt-name field
# of the X509 certificate. This must be called prior to signing the
# certificate. It may only be called once per certificate.
def encode(self):
if self.urn:
urn = self.urn
else:
urn = hrn_to_urn(self.hrn, None)
str = "URI:" + urn
if self.uuid:
str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
if self.email:
str += ", " + "email:" + self.email
self.set_data(str, 'subjectAltName')
##
# Decode the subject-alt-name field of the X509 certificate into the
# fields of the GID. This is automatically called by the various get_*()
# functions in this class.
def decode(self):
data = self.get_data('subjectAltName')
dict = {}
if data:
if data.lower().startswith('uri:http://<params>'):
dict = xmlrpclib.loads(data[11:])[0][0]
else:
spl = data.split(', ')
for val in spl:
if val.lower().startswith('uri:urn:uuid:'):
dict['uuid'] = uuid.UUID(val[4:]).int
elif val.lower().startswith('uri:urn:publicid:idn+'):
dict['urn'] = val[4:]
elif val.lower().startswith('email:'):
# FIXME: Ensure there isn't cruft in that address...
# EG look for email:copy,....
dict['email'] = val[6:]
self.uuid = dict.get("uuid", None)
self.urn = dict.get("urn", None)
self.hrn = dict.get("hrn", None)
self.email = dict.get("email", None)
if self.urn:
self.hrn = urn_to_hrn(self.urn)[0]
##
# Dump the credential to stdout.
#
# @param indent specifies a number of spaces to indent the output
# @param dump_parents If true, also dump the parents of the GID
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, indent=0, dump_parents=False):
result=" "*(indent-2) + "GID\n"
result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
if self.get_email() is not None:
result += " "*indent + "email:" + str(self.get_email()) + "\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.parent and dump_parents:
result += " "*indent + "parent:\n"
result += self.parent.dump_string(indent+4, dump_parents)
return result
##
# Verify the chain of authenticity of the GID. First perform the checks
# of the certificate class (verifying that each parent signs the child,
# etc). In addition, GIDs also confirm that the parent's HRN is a prefix
# of the child's HRN, and the parent is of type 'authority'.
#
# Verifying these prefixes prevents a rogue authority from signing a GID
# for a principal that is not a member of that authority. For example,
# planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
trusted_root = Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
self.parent.verify_chain(trusted_certs)
else:
# make sure that the trusted root's hrn is a prefix of the child's
trusted_gid = GID(string=trusted_root.save_to_string())
trusted_type = trusted_gid.get_type()
trusted_hrn = trusted_gid.get_hrn()
#if trusted_type == 'authority':
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert: %s" % (trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
return
|
from twitter.common.quantity import Amount, Time
from twitter.pants.targets.python_target import PythonTarget
class PythonTests(PythonTarget):
def __init__(self, name, sources, resources=None, dependencies=None,
timeout=Amount(2, Time.MINUTES),
soft_dependencies=False):
"""
name / sources / resources / dependencies: See PythonLibrary target
timeout: Amount of time before this test should be considered timed-out
[Default: 2 minutes]
soft_dependencies: Whether or not we should ignore dependency resolution
errors for this test. [Default: False]
"""
self._timeout = timeout
self._soft_dependencies = bool(soft_dependencies)
PythonTarget.__init__(self, name, sources, resources, dependencies)
self.add_label('python')
self.add_label('tests')
@property
def timeout(self):
return self._timeout
class PythonTestSuite(PythonTarget):
def __init__(self, name, dependencies=None):
PythonTarget.__init__(self, name, (), (), dependencies)
|
'''
Name : ThammeGowda Narayanaswamy
USCID: 2074669439
'''
import math
from scipy.stats import multivariate_normal
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from scipy import spatial
from scipy import stats
from pprint import pprint
blob_file = "hw5_blob.csv"
circle_file = "hw5_circle.csv"
def load_points(f_name):
with open(f_name) as f:
res = []
for l in f:
x,y = l.split(",")
res.append([float(x), float(y)])
return np.array(res)
blobs = load_points(blob_file)
circles = load_points(circle_file)
'''
plt.plot(*zip(*circles), marker='o', color='r', ls='')
plt.show()
plt.plot(*zip(*blobs), marker='o', color='b', ls='')
plt.show()
'''
def k_means(k, pts, get_indices=False, silent=True, tol=1e-5):
N = len(pts)
assert k <= N
print("K=%d, N=%d" % (k, N))
# pick random k points
pos = set()
while len(pos) < k:
r = np.random.randint(N)
pos.add(r)
centroids = []
for p in pos:
centroids.append(tuple(pts[p]))
change = float('inf')
conv_tol = 1e-5
itr, max_iters = 0, 100
while change > tol and itr < max_iters:
itr += 1
# assign cluster to each point
asgn = {}
indices = {}
for ct in centroids:
asgn[ct] = []
indices[ct] = []
for idx, pt in enumerate(pts):
mindist = float('inf')
a = None
for ct in centroids:
dist = spatial.distance.cdist([ct], [pt])
if dist < mindist:
mindist = dist
a = ct
asgn[a].append(pt)
indices[a].append(idx)
# compute means of each cluster
oldcentr = centroids
centroids = []
for ct, cluster in asgn.items():
centroids.append(tuple(np.array(cluster).mean(axis=0)))
dist_matrix = spatial.distance.cdist(oldcentr, centroids)
# has distance between each pair of {new, old} centroids
# need the diagonal values
change = dist_matrix.trace()
if not silent:
print("Movement in centroids", change)
return indices if get_indices else asgn
print("# K Means")
colors = ['r', 'g', 'b', 'y', 'c', 'k']
plt.figure(1, figsize=(15, 10))
plt.title("K Means")
ks = {2,3,5}
dss = {'Blobs': blobs, 'Circles': circles}
j = 1
for title, ds in dss.items():
for k in ks:
clstrs = k_means(k, ds)
plt.subplot(2, 3, j)
i = 0
for cnt, cpts in clstrs.items():
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.title("%s , K=%d" % (title, k))
j += 1
plt.show()
'''
center = [0.0, 0.0]
newdim = sp.spatial.distance.cdist([center], circles).transpose()
clusters = k_means(2, newdim, get_indices=True)
i = 0
for cnt, cpts in clusters.items():
cpts = map(lambda x: circles[x], cpts)
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
'''
print("Kernel K means")
class KernelKMeans(object):
def kernel_matrix(self, data, kernel_func):
''' Computes kernel matrix
: params:
data - data points
kernel_func - kernel function
:returns: nxn matrix
'''
n = data.shape[0]
K = np.zeros((n,n), dtype=float)
for i in range(n):
for j in range(n):
K[i,j] = kernel_func(data[i], data[j])
return K
def cluster(self, X, k, kernel_func, max_itr=100, tol=1e-3):
'''
Clusters the points
:params:
X - data points
k - number of clusters
kernel_func - kernel function that outputs smaller values for points in same cluster
:returns: Nx1 vector of assignments
'''
# N
N = X.shape[0]
# NxN matrix from kernel funnction element wise
K = self.kernel_matrix(X, kernel_func)
# equal weightage to all
cluster_weights = np.ones(N)
# Assignments : random assignments to begin with
A = np.random.randint(k, size=N)
for it in xrange(max_itr): # stuck up between 2 local minimas, abort after maxiter
# N x k matrix that stores distance between every point and cluster center
dist = self.compute_dist(K, k, A, sw=cluster_weights)
oldA, A = A, dist.argmin(axis=1)
# Check if it is conveged
n_same = np.sum(np.abs(A - oldA) == 0)
if 1 - float(n_same) / N < tol:
print "Converged at iteration:", it + 1
break
return A
def compute_dist(self, K, k, A, sw):
"""
Computes Nxk distance matrix using kernel matrix
: params:
K - NxN kernel Matrix
k - number of clusters
A - Nx1 Assignments
sw - sample weights
: returns : Nxk distance matrix
"""
dist = np.zeros((K.shape[0], k))
for cl in xrange(k):
mask = A == cl
if np.sum(mask) == 0:
raise Error("ERROR:cluster '%d' is empty. Looks like we cant make %d clusters" % (cl, k))
N_ = sw[mask].sum()
KK = K[mask][:, mask]
dist[:, cl] += np.sum(np.outer(sw[mask], sw[mask]) * KK / (N_*N_))
dist[:, cl] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / N_
return dist
def distance(x1, x2):
'''Squared Eucledian distance between 2 points
:params:
x1 - point1
x2 - point2
'''
return np.sum((x1 - x2) ** 2)
def circular_kernel(x1, x2, center=None):
'''This kernel outputs lesser distance for the points that are from circumference
:params:
x1 - first point
x2 - second point
center - center of circle(default = origin (0,0,...))
'''
if center is None:
center = np.zeros(len(x1))
dist1 = distance(x1, center)
dist2 = distance(x2, center)
return 1.0 - min(dist1, dist2) / max(dist1, dist2)
clusters = KernelKMeans().cluster(circles, 2, circular_kernel)
for i in range(k):
cpts = circles[clusters == i]
plt.plot(*zip(*cpts), marker='o', color=colors[i], ls='')
i += 1
plt.show()
print("EM Algorithm")
def multivar_gaussian_pdf(x, mu, covar):
return multivariate_normal.pdf(x, mean=mu, cov=covar)
class EM_GMM(object):
def __init__(self, data, k):
self.data = data
self.k = k
self.N = data.shape[0]
# theta param
self.mean, self.cov, self.weight = [], [], []
# random initialization
A = np.random.randint(k, size=data.shape[0])
for c in range(k):
cpts = data[A == c]
self.mean.append(np.mean(cpts, axis=0))
self.cov.append(np.cov(np.array(cpts).transpose()))
self.weight.append(1.0 * cpts.shape[0] / data.shape[0])
def compute_gamma(self):
gamma = np.zeros((self.N, self.k), dtype=float)
for idx, pt in enumerate(data):
pdf = []
for ct in range(k):
temp = multivar_gaussian_pdf(pt, self.mean[ct], self.cov[ct])
pdf.append(temp * self.weight[ct])
gamma[idx] = np.array(pdf) / sum(pdf)
return gamma
def update_theta(self, P):
weights = P.sum(axis=0)/P.sum()
means = []
covs = []
for i in range(self.k):
nr_mu = (P[:, i:i+1] * self.data).sum(axis=0)
dr_mu = P[:, i].sum(axis=0)
pt_mu = nr_mu / dr_mu
means.append(pt_mu)
for i in range(self.k):
nr_cov = (P[:, i:i+1] * (self.data - means[i])).transpose().dot(self.data - means[i])
dr_cov = P[:, i].sum(axis=0)
covs.append(nr_cov / dr_cov)
self.mean= means
self.cov = covs
self.weight = weights
def log_likelihood(self):
log_sum = 0.
for _, pt in enumerate(self.data):
row_sum = []
for ct in range(self.k):
p_X_given_N = multivar_gaussian_pdf(pt, self.mean[ct], self.cov[ct])
p_N = self.weight[ct]
joint = p_N * p_X_given_N
row_sum.append(joint)
res = sum(row_sum)
log_sum += math.log(res)
return log_sum
def gmm(self, max_itr = 50):
ll = []
for itr in range(max_itr):
old_means = self.mean # used for convergance test
gamma = self.compute_gamma()
self.update_theta(gamma)
ll.append(self.log_likelihood())
if np.sum(np.abs(np.array(self.mean) - np.array(old_means))) < 1e-3:
break
return gamma, ll
data = blobs
max_ll = 0
plt.figure(1, figsize=(8, 6))
legends = []
k = 3
for i in range(1,6):
em = EM_GMM(data, k)
gamma, ll = em.gmm()
if ll >= max_ll:
best_gamma = gamma
best = em
max_ll = ll
print "Converged: ", len(ll)
plt.plot(range(len(ll)), ll , '-', color=colors[i])
legends.append(mpatches.Patch(color=colors[i], label='Iteration: %d' % i))
plt.legend(handles=legends)
plt.show()
idx = best_gamma.argmax(axis=1)
print "Best parameters: "
print "Mean:", best.mean
print "Covar:", best.cov
plt.scatter(data[:,0], data[:,1], color=[colors[i] for i in idx] )
plt.show()
|
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xst = '''<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="topLevel">
<xs:complexType>
<xs:sequence>
<xs:element name="item" type="xs:int" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
'''
code = pyxb.binding.generate.GeneratePython(schema_text=xst)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0218 (unittest.TestCase):
def testBasic (self):
instance = topLevel()
self.assertTrue(instance.item is not None)
self.assertFalse(instance.item is None)
self.assertTrue(instance.item != None)
self.assertTrue(None != instance.item)
self.assertFalse(instance.item)
instance.item.extend([1,2,3,4])
self.assertTrue(instance.item is not None)
self.assertFalse(instance.item is None)
self.assertTrue(instance.item != None)
self.assertTrue(None != instance.item)
self.assertTrue(instance.item)
if __name__ == '__main__':
unittest.main()
|
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.services.kafka.directory import kafka_dir
import os
import subprocess
"""
0.8.2.1 MirrorMaker options
Option Description
------ -----------
--abort.on.send.failure <Stop the Configure the mirror maker to exit on
entire mirror maker when a send a failed send. (default: true)
failure occurs>
--blacklist <Java regex (String)> Blacklist of topics to mirror.
--consumer.config <config file> Embedded consumer config for consuming
from the source cluster.
--consumer.rebalance.listener <A The consumer rebalance listener to use
custom rebalance listener of type for mirror maker consumer.
ConsumerRebalanceListener>
--help Print this message.
--message.handler <A custom message Message handler which will process
handler of type every record in-between consumer and
MirrorMakerMessageHandler> producer.
--message.handler.args <Arguments Arguments used by custom rebalance
passed to message handler listener for mirror maker consumer
constructor.>
--num.streams <Integer: Number of Number of consumption streams.
threads> (default: 1)
--offset.commit.interval.ms <Integer: Offset commit interval in ms (default:
offset commit interval in 60000)
millisecond>
--producer.config <config file> Embedded producer config.
--rebalance.listener.args <Arguments Arguments used by custom rebalance
passed to custom rebalance listener listener for mirror maker consumer
constructor as a string.>
--whitelist <Java regex (String)> Whitelist of topics to mirror.
"""
class MirrorMaker(Service):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/mirror_maker"
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "mirror_maker.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PRODUCER_CONFIG = os.path.join(PERSISTENT_ROOT, "producer.properties")
CONSUMER_CONFIG = os.path.join(PERSISTENT_ROOT, "consumer.properties")
logs = {
"mirror_maker_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, source, target, whitelist=None, blacklist=None, num_streams=1, consumer_timeout_ms=None):
"""
MirrorMaker mirrors messages from one or more source clusters to a single destination cluster.
Args:
context: standard context
source: source Kafka cluster
target: target Kafka cluster to which data will be mirrored
whitelist: whitelist regex for topics to mirror
blacklist: blacklist regex for topics not to mirror
num_streams: number of consumer threads to create; can be a single int, or a list with
one value per node, allowing num_streams to be the same for each node,
or configured independently per-node
consumer_timeout_ms: consumer stops if t > consumer_timeout_ms elapses between consecutive messages
"""
super(MirrorMaker, self).__init__(context, num_nodes=num_nodes)
self.consumer_timeout_ms = consumer_timeout_ms
self.num_streams = num_streams
if not isinstance(num_streams, int):
# if not an integer, num_streams should be configured per-node
assert len(num_streams) == num_nodes
self.whitelist = whitelist
self.blacklist = blacklist
self.source = source
self.target = target
def start_cmd(self, node):
cmd = "export LOG_DIR=%s;" % MirrorMaker.LOG_DIR
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\";" % MirrorMaker.LOG4J_CONFIG
cmd += " /opt/%s/bin/kafka-run-class.sh kafka.tools.MirrorMaker" % kafka_dir(node)
cmd += " --consumer.config %s" % MirrorMaker.CONSUMER_CONFIG
cmd += " --producer.config %s" % MirrorMaker.PRODUCER_CONFIG
if isinstance(self.num_streams, int):
cmd += " --num.streams %d" % self.num_streams
else:
# config num_streams separately on each node
cmd += " --num.streams %d" % self.num_streams[self.idx(node) - 1]
if self.whitelist is not None:
cmd += " --whitelist=\"%s\"" % self.whitelist
if self.blacklist is not None:
cmd += " --blacklist=\"%s\"" % self.blacklist
cmd += " 1>> %s 2>> %s &" % (MirrorMaker.LOG_FILE, MirrorMaker.LOG_FILE)
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i MirrorMaker | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def start_node(self, node):
node.account.ssh("mkdir -p %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
node.account.ssh("mkdir -p %s" % MirrorMaker.LOG_DIR, allow_fail=False)
# Create, upload one consumer config file for source cluster
consumer_props = self.render('consumer.properties', zookeeper_connect=self.source.zk.connect_setting())
node.account.create_file(MirrorMaker.CONSUMER_CONFIG, consumer_props)
# Create, upload producer properties file for target cluster
producer_props = self.render('producer.properties', broker_list=self.target.bootstrap_servers(),
producer_type="async")
node.account.create_file(MirrorMaker.PRODUCER_CONFIG, producer_props)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=MirrorMaker.LOG_FILE)
node.account.create_file(MirrorMaker.LOG4J_CONFIG, log_config)
# Run mirror maker
cmd = self.start_cmd(node)
self.logger.debug("Mirror maker command: %s", cmd)
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to start.")
self.logger.debug("Mirror maker is alive")
def stop_node(self, node):
node.account.kill_process("java", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
|
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
self.assertIsNone(checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
9, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
14, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"Test end string for vi",
15, lines))
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_in(self):
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), True)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, any(a==1 for a in b))"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), False)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, any(a==1 for a in b))"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_assert_true_or_false_with_in_or_not_in(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in ['1', '2', '3'])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in [1, 2, 3])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B))"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B), 'some message')"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(some in list1 and some2 in list2)"))), 0)
def test_no_translate_debug_logs(self):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
def test_no_setting_conf_directly_in_tests(self):
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
# Shouldn't fail with comparisons
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option != 1", "nova/tests/test_foo.py"))), 0)
# Shouldn't fail since not in nova/tests/
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/compute/foo.py"))), 0)
def test_log_translations(self):
logs = ['audit', 'error', 'info', 'warning', 'critical', 'warn',
'exception']
levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')"
audit = "LOG.audit(_('OK'))"
self.assertEqual(
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
self.assertEqual(
0, len(list(checks.validate_log_translations(audit, audit, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(1,
len(list(
checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
for level in levels:
ok = "LOG.%s(%s('OK'))" % (log, level)
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def fake_suds_context(calls={}):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder/tests/other_files3.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files3.py"))), 0)
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N324: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./nova/virt/xenapi/driver.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%s(" % method,
"./plugins/xenserver/script.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./nova/virt/xenapi/driver.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.dumb",
"./nova/virt/xenapi/driver.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_str_unicode_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_api_version_decorator_check(self):
code = """
@some_other_decorator
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_errors(code, checks.check_api_version_decorator,
expected_errors=[(2, 0, "N332")])
def test_oslo_namespace_imports_check(self):
code = """
from oslo.concurrency import processutils
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_2(self):
code = """
from oslo import i18n
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_3(self):
code = """
import oslo.messaging
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_assert_raises_regexp(self):
code = """
self.assertRaisesRegexp(ValueError,
"invalid literal for.*XYZ'$",
int,
'XYZ')
"""
self._assert_has_errors(code, checks.assert_raises_regexp,
expected_errors=[(1, 0, "N335")])
def test_api_version_decorator_check_no_errors(self):
code = """
class ControllerClass():
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_no_errors(code, checks.check_api_version_decorator)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
(16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
self._assert_has_no_errors(code, checker)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
|
"""
Test how many times newly loaded binaries are notified;
they should be delivered in batches instead of one-by-one.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ModuleLoadedNotifysTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
# DynamicLoaderDarwin should batch up notifications about
# newly added/removed libraries. Other DynamicLoaders may
# not be written this way.
@skipUnlessDarwin
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// breakpoint')
def test_launch_notifications(self):
"""Test that lldb broadcasts newly loaded libraries in batches."""
self.build()
exe = self.getBuildArtifact("a.out")
self.dbg.SetAsync(False)
listener = self.dbg.GetListener()
listener.StartListeningForEventClass(
self.dbg,
lldb.SBTarget.GetBroadcasterClassName(),
lldb.SBTarget.eBroadcastBitModulesLoaded | lldb.SBTarget.eBroadcastBitModulesUnloaded)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# break on main
breakpoint = target.BreakpointCreateByName('main', 'a.out')
event = lldb.SBEvent()
# CreateTarget() generated modules-loaded events; consume them & toss
while listener.GetNextEvent(event):
True
error = lldb.SBError()
flags = target.GetLaunchInfo().GetLaunchFlags()
process = target.Launch(listener,
None, # argv
None, # envp
None, # stdin_path
None, # stdout_path
None, # stderr_path
None, # working directory
flags, # launch flags
False, # Stop at entry
error) # error
self.assertTrue(
process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
total_solibs_added = 0
total_solibs_removed = 0
total_modules_added_events = 0
total_modules_removed_events = 0
while listener.GetNextEvent(event):
if lldb.SBTarget.EventIsTargetEvent(event):
if event.GetType() == lldb.SBTarget.eBroadcastBitModulesLoaded:
solib_count = lldb.SBTarget.GetNumModulesFromEvent(event)
total_modules_added_events += 1
total_solibs_added += solib_count
if self.TraceOn():
# print all of the binaries that have been added
added_files = []
i = 0
while i < solib_count:
module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event)
added_files.append(module.GetFileSpec().GetFilename())
i = i + 1
print("Loaded files: %s" % (', '.join(added_files)))
if event.GetType() == lldb.SBTarget.eBroadcastBitModulesUnloaded:
solib_count = lldb.SBTarget.GetNumModulesFromEvent(event)
total_modules_removed_events += 1
total_solibs_removed += solib_count
if self.TraceOn():
# print all of the binaries that have been removed
removed_files = []
i = 0
while i < solib_count:
module = lldb.SBTarget.GetModuleAtIndexFromEvent(i, event)
removed_files.append(module.GetFileSpec().GetFilename())
i = i + 1
print("Unloaded files: %s" % (', '.join(removed_files)))
# This is testing that we get back a small number of events with the loaded
# binaries in batches. Check that we got back more than 1 solib per event.
# In practice on Darwin today, we get back two events for a do-nothing c
# program: a.out and dyld, and then all the rest of the system libraries.
avg_solibs_added_per_event = int(float(total_solibs_added) / float(total_modules_added_events))
self.assertGreater(avg_solibs_added_per_event, 1)
|
import types
import unittest
from collections import namedtuple
import os
import sys
import tempfile
from zipfile import ZipFile, ZipInfo
from utils import jar_utils
sys.path.append('tests/unit/')
import mock
from plugins.systems.config_container_crawler import ConfigContainerCrawler
from plugins.systems.config_host_crawler import ConfigHostCrawler
from plugins.systems.connection_container_crawler import ConnectionContainerCrawler
from plugins.systems.connection_host_crawler import ConnectionHostCrawler
from plugins.systems.connection_vm_crawler import ConnectionVmCrawler
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.disk_container_crawler import DiskContainerCrawler
from plugins.systems.disk_host_crawler import DiskHostCrawler
from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler
from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCrawler
from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler
from plugins.systems.file_container_crawler import FileContainerCrawler
from plugins.systems.file_host_crawler import FileHostCrawler
from plugins.systems.interface_container_crawler import InterfaceContainerCrawler
from plugins.systems.interface_host_crawler import InterfaceHostCrawler
from plugins.systems.interface_vm_crawler import InterfaceVmCrawler
from plugins.systems.jar_container_crawler import JarContainerCrawler
from plugins.systems.jar_host_crawler import JarHostCrawler
from plugins.systems.load_container_crawler import LoadContainerCrawler
from plugins.systems.load_host_crawler import LoadHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.memory_vm_crawler import MemoryVmCrawler
from plugins.systems.metric_container_crawler import MetricContainerCrawler
from plugins.systems.metric_host_crawler import MetricHostCrawler
from plugins.systems.metric_vm_crawler import MetricVmCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.os_host_crawler import OSHostCrawler
from plugins.systems.os_vm_crawler import os_vm_crawler
from plugins.systems.package_container_crawler import PackageContainerCrawler
from plugins.systems.package_host_crawler import PackageHostCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
from plugins.systems.process_host_crawler import ProcessHostCrawler
from plugins.systems.process_vm_crawler import process_vm_crawler
from container import Container
from utils.crawler_exceptions import CrawlError
from utils.features import (
OSFeature,
ConfigFeature,
DiskFeature,
PackageFeature,
MemoryFeature,
CpuFeature,
InterfaceFeature,
LoadFeature,
DockerPSFeature,
JarFeature)
class DummyContainer(Container):
def __init__(self, long_id):
self.pid = '1234'
self.long_id = long_id
def get_memory_cgroup_path(self, node):
return '/cgroup/%s' % node
def get_cpu_cgroup_path(self, node):
return '/cgroup/%s' % node
psvmi_sysinfo = namedtuple('psvmi_sysinfo',
'''boottime ipaddr osdistro osname osplatform osrelease
ostype osversion memory_used memory_buffered
memory_cached memory_free''')
psvmi_memory = namedtuple(
'psvmi_memory',
'memory_used memory_buffered memory_cached memory_free')
psvmi_interface = namedtuple(
'psvmi_interface',
'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin')
os_stat = namedtuple(
'os_stat',
'''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''')
def mocked_os_walk(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
def mocked_os_walk_for_avoidsetns(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/1/2/3', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/1/2/3/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
mcount = 0
class MockedMemCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
return '2'
def __iter__(self):
return self
def next(self):
global mcount
mcount += 1
if mcount == 1:
return 'total_cache 100'
if mcount == 2:
return 'total_active_file 200'
else:
raise StopIteration()
ccount = 0
ccount2 = 0
class MockedCpuCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
global ccount2
ccount2 += 1
if ccount2 == 1:
return '1e7'
else:
return '2e7'
def __iter__(self):
return self
def next(self):
global ccount
ccount += 1
if ccount == 1:
return 'system 20'
if ccount == 2:
return 'user 20'
else:
raise StopIteration()
class MockedFile(mock.Mock):
def __init__(self):
pass
def read(self):
return 'content'
def mocked_codecs_open(filename, mode, encoding, errors):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedFile())
m.__exit__ = mock.Mock(return_value=False)
return m
def mocked_cpu_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
def mocked_memory_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
partition = namedtuple('partition', 'device fstype mountpoint opts')
pdiskusage = namedtuple('pdiskusage', 'percent total')
meminfo = namedtuple('meminfo', 'rss vms')
ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes')
psutils_memory = namedtuple('psutils_memory', 'used free buffers cached')
psutils_cpu = namedtuple(
'psutils_cpu',
'idle nice user iowait system irq steal')
psutils_net = namedtuple(
'psutils_net',
'bytes_sent bytes_recv packets_sent packets_recv errout errin')
def mocked_disk_partitions(all):
return [partition('/dev/a', 'type', '/a', 'opts'),
partition('/dev/b', 'type', '/b', 'opts')]
class Connection():
def __init__(self):
self.laddr = ['1.1.1.1', '22']
self.raddr = ['2.2.2.2', '22']
self.status = 'Established'
class Process():
def __init__(self, name):
self.name = name
self.cmdline = ['cmd']
self.pid = 123
self.status = 'Running'
self.cwd = '/bin'
self.ppid = 1
self.create_time = 1000
def num_threads(self):
return 1
def username(self):
return 'don quijote'
def get_open_files(self):
return []
def get_connections(self):
return [Connection()]
def get_memory_info(self):
return meminfo(10, 20)
def get_io_counters(self):
return ioinfo(10, 20)
def get_cpu_percent(self, interval):
return 30
def get_memory_percent(self):
return 30
STAT_DIR_MODE = 16749
def mocked_os_lstat(path):
print path
if path == '/':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
elif path == '/file1':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file2':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file3':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/dir':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
else:
return os_stat(1, 2, 3, 4, 5, 6, 7)
def mocked_run_as_another_namespace(pid, ns, function, *args, **kwargs):
result = function(*args)
# if res is a generator (i.e. function uses yield)
if isinstance(result, types.GeneratorType):
result = list(result)
return result
def throw_os_error(*args, **kvargs):
raise OSError()
class PluginTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self, *args):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_cawler_plugin(self, *args):
fc = OSHostCrawler()
for os in fc.crawl():
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch('utils.os_utils.platform.system',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl():
pass
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_host_crawler_plugin_mountpoint_mode(self, *args):
fc = OSHostCrawler()
for os in fc.crawl(root_dir='/a'):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
assert arg.call_count == 1
@mock.patch('utils.os_utils.osinfo.get_osinfo',
side_effect=throw_os_error)
def test_os_host_crawler_plugin_mountpoint_mode_failure(self, *args):
fc = OSHostCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(root_dir='/a'):
pass
@mock.patch('utils.os_utils.time.time',
side_effect=lambda: 1001)
@mock.patch('utils.os_utils.platform.platform',
side_effect=lambda: 'platform')
@mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses',
side_effect=lambda: ['1.1.1.1'])
@mock.patch('utils.os_utils.psutil.boot_time',
side_effect=lambda: 1000)
@mock.patch('utils.os_utils.platform.system',
side_effect=lambda: 'linux')
@mock.patch('utils.os_utils.platform.machine',
side_effect=lambda: 'machine')
@mock.patch(
("plugins.systems.os_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123):
print os
assert os == (
'linux',
OSFeature(
boottime=1000,
uptime=1,
ipaddr=['1.1.1.1'],
os='os',
os_version='os_version',
os_kernel='platform',
architecture='machine'),
'os')
for i, arg in enumerate(args):
if i > 0: # time.time is called more than once
continue
assert arg.call_count == 1
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.os_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'os',
'version': 'os_version'})
def test_os_container_crawler_plugin_avoidsetns(self, *args):
fc = OSContainerCrawler()
for os in fc.crawl(container_id=123, avoid_setns=True):
print os
assert os == (
'linux',
OSFeature(
boottime='unsupported',
uptime='unsupported',
ipaddr='0.0.0.0',
os='os',
os_version='os_version',
os_kernel='unknown',
architecture='unknown'),
'os')
for i, arg in enumerate(args):
print i, arg
if i == 0:
# get_osinfo()
assert arg.call_count == 1
arg.assert_called_with(mount_point='/a/b/c')
elif i == 1:
# get_docker_container_rootfs_path
assert arg.call_count == 1
arg.assert_called_with(123)
else:
# exec_dockerinspect
assert arg.call_count == 1
arg.assert_called_with(123)
@mock.patch(
("plugins.systems.os_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.os_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=throw_os_error)
def test_os_container_crawler_plugin_avoidsetns_failure(self, *args):
fc = OSContainerCrawler()
with self.assertRaises(OSError):
for os in fc.crawl(container_id=123, avoid_setns=True):
pass
@mock.patch('plugins.systems.os_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.os_vm_crawler.psvmi.system_info',
side_effect=lambda vmc: psvmi_sysinfo(1000,
'1.1.1.1',
'osdistro',
'osname',
'osplatform',
'osrelease',
'ostype',
'osversion',
1000000,
100000,
100000,
100000))
@mock.patch('plugins.systems.os_vm_crawler.psvmi')
def test_os_vm_crawler_plugin_without_vm(self, *args):
fc = os_vm_crawler()
for os in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert os == (
'ostype',
OSFeature(
boottime=1000,
uptime='unknown',
ipaddr='1.1.1.1',
os='ostype',
os_version='osversion',
os_kernel='osrelease',
architecture='osplatform'),
'os')
pass
assert args[1].call_count == 1
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_with_exclude_dirs(self, *args):
fc = FileHostCrawler()
for (k, f, fname) in fc.crawl(exclude_dirs=['dir']):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_host_crawler_failure(self, *args):
fc = FileHostCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/'):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.jar_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.jar_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
def test_jar_container_crawler_plugin(self, *args):
tmpdir = tempfile.mkdtemp()
jar_file_name = 'myfile.jar'
# Ensure the file is read/write by the creator only
saved_umask = os.umask(0077)
path = os.path.join(tmpdir, jar_file_name)
try:
with ZipFile(path, "w") as myjar:
myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!")
myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!")
myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!")
fc = JarContainerCrawler()
jars = list(fc.crawl(root_dir=tmpdir))
#jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir))
print jars
jar_feature = jars[0][1]
assert 'myfile.jar' == jar_feature.name
assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash
assert ['ddc6eff37020aa858e26b1ba8a49ee0e',
'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes
assert 'jar' == jars[0][2]
except IOError as e:
print 'IOError'
finally:
os.remove(path)
@mock.patch(
("plugins.systems.jar_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.jar_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/tmp')
def test_jar_container_crawler_avoidsetns(self, *args):
tmpdir = tempfile.mkdtemp()
jar_file_name = 'myfile.jar'
# Ensure the file is read/write by the creator only
saved_umask = os.umask(0077)
path = os.path.join(tmpdir, jar_file_name)
try:
with ZipFile(path, "w") as myjar:
myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!")
myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!")
myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!")
fc = JarContainerCrawler()
jars = list(fc.crawl(root_dir=os.path.basename(tmpdir), avoid_setns=True))
print jars
jar_feature = jars[0][1]
assert 'myfile.jar' == jar_feature.name
assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash
assert ['ddc6eff37020aa858e26b1ba8a49ee0e',
'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes
assert 'jar' == jars[0][2]
except IOError as e:
print 'IOError'
finally:
os.remove(path)
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch('utils.file_utils.os.walk',
side_effect=throw_os_error)
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_failure(self, *args):
fc = FileContainerCrawler()
with self.assertRaises(OSError):
for (k, f, fname) in fc.crawl(root_dir='/a/b/c'):
pass
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/', avoid_setns=True):
print f
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3',
'/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 6
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_with_exclude_dirs(self, *args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
exclude_dirs=['dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/')
@mock.patch(
("plugins.systems.file_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.file_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.file_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.file_utils.os.walk',
side_effect=mocked_os_walk_for_avoidsetns)
@mock.patch('utils.file_utils.os.lstat',
side_effect=mocked_os_lstat)
def test_file_container_crawler_avoidsetns_with_exclude_dirs(
self,
*
args):
fc = FileContainerCrawler()
for (k, f, fname) in fc.crawl(root_dir='/',
avoid_setns=True,
exclude_dirs=['/dir']):
assert fname == "file"
assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3
assert f.atime == 4 and f.ctime == 5
assert f.mtime == 6 and f.size == 7
assert f.name in ['', 'file1', 'file2', 'file3', 'file4']
assert f.path in ['/', '/file1', '/file2', '/file3']
assert f.path not in ['/dir', '/dir/file4']
assert f.type in ['file', 'dir']
assert f.linksto is None
assert args[0].call_count == 4
assert args[1].call_count == 1 # oswalk
args[1].assert_called_with('/1/2/3')
assert args[2].call_count == 2 # isdir
args[2].assert_called_with('/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler(self, *args):
fc = ConfigHostCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_host_crawler_with_discover(self, *args):
fc = ConfigHostCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
print configs
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # codecs open
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.config_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
def test_config_container_crawler_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns(self, *args):
fc = ConfigContainerCrawler()
for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'],
discover_config_files=False,
avoid_setns=True):
assert fname == "config"
assert f == ConfigFeature(name='file1', content='content',
path='/etc/file1')
assert args[0].call_count == 1 # lstat
@mock.patch(
("plugins.systems.config_container_crawler."
"run_as_another_namespace"),
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.config_container_crawler."
"utils.dockerutils.exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.config_container_crawler.utils.dockerutils."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/1/2/3')
@mock.patch('utils.config_utils.os.path.isdir',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.walk',
side_effect=lambda p: [
('/', [], ['file1', 'file2', 'file3.conf'])])
@mock.patch('utils.config_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.isfile',
side_effect=lambda p: True)
@mock.patch('utils.config_utils.os.path.getsize',
side_effect=lambda p: 1000)
@mock.patch('utils.config_utils.os.lstat',
side_effect=mocked_os_lstat)
@mock.patch('utils.config_utils.codecs.open',
side_effect=mocked_codecs_open)
def test_config_container_crawler_avoidsetns_discover(self, *args):
fc = ConfigContainerCrawler()
configs = fc.crawl(known_config_files=['/etc/file1'],
avoid_setns=True,
discover_config_files=True)
assert set(configs) == set([('/file3.conf',
ConfigFeature(name='file3.conf',
content='content',
path='/file3.conf'),
'config'),
('/etc/file1',
ConfigFeature(name='file1',
content='content',
path='/etc/file1'),
'config')])
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_dpkg(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_host_crawler_dpkg_failure(self, *args):
fc = PackageHostCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=lambda a, b, c, d: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_host_crawler_rpm(self, *args):
fc = PackageHostCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/rpm', 0, False)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_dpkg(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl():
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
args[0].assert_called_with('/', 'var/lib/dpkg', 0)
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'dpkg' in p else False)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=throw_os_error)
def test_package_container_crawler_dpkg_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/dpkg', 0)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.package_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'redhat',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True if 'rpm' in p else False)
@mock.patch('utils.package_utils.get_rpm_packages',
side_effect=throw_os_error)
def test_package_container_crawler_rpm_failure(self, *args):
fc = PackageContainerCrawler()
with self.assertRaises(CrawlError):
for (k, f, fname) in fc.crawl():
pass
# get_dpkg_packages is called a second time after the first failure.
# first time is OUTCONTAINER mode with setns
# second time is OUTCONTAINER mode with avoid_setns
assert args[0].call_count == 2
args[0].assert_called_with('/a/b/c', 'var/lib/rpm', 0, True)
args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo()
@mock.patch(
("plugins.systems.package_container_crawler."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
("plugins.systems.package_container_crawler."
"get_docker_container_rootfs_path"),
side_effect=lambda long_id: '/a/b/c')
@mock.patch(
'utils.package_utils.osinfo.get_osinfo',
side_effect=lambda mount_point=None: {
'os': 'ubuntu',
'version': '123'})
@mock.patch('utils.package_utils.os.path.exists',
side_effect=lambda p: True)
@mock.patch('utils.package_utils.get_dpkg_packages',
side_effect=lambda a, b, c: [('pkg1',
PackageFeature(None, 'pkg1',
123, 'v1',
'x86'))])
def test_package_container_crawler_avoidsetns(self, *args):
fc = PackageContainerCrawler()
for (k, f, fname) in fc.crawl(avoid_setns=True):
assert fname == "package"
assert f == PackageFeature(
installed=None,
pkgname='pkg1',
pkgsize=123,
pkgversion='v1',
pkgarchitecture='x86')
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_host_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_process_host_crawler(self, *args):
fc = ProcessHostCrawler()
for (k, f, fname) in fc.crawl():
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch(
("plugins.systems.process_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
@mock.patch(
'plugins.systems.process_container_crawler.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.process_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
def test_process_container_crawler(self, *args):
fc = ProcessContainerCrawler()
for (k, f, fname) in fc.crawl('123'):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[0].call_count == 1
@mock.patch('plugins.systems.process_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.process_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.process_vm_crawler.psvmi')
def test_process_vm_crawler(self, *args):
fc = process_vm_crawler()
for (k, f, fname) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
print f
assert fname == "process"
assert f.pname == 'init'
assert f.cmd == 'cmd'
assert f.pid == 123
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
def test_crawl_disk_partitions_invm_mode(self, *args):
fc = DiskHostCrawler()
disks = fc.crawl()
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch(
'plugins.systems.disk_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('utils.disk_utils.psutil.disk_partitions',
side_effect=mocked_disk_partitions)
@mock.patch('utils.disk_utils.psutil.disk_usage',
side_effect=lambda x: pdiskusage(10, 100))
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_disk_partitions_outcontainer_mode(self, *args):
fc = DiskContainerCrawler()
disks = fc.crawl('123')
assert set(disks) == set([('/a',
DiskFeature(partitionname='/dev/a',
freepct=90.0,
fstype='type',
mountpt='/a',
mountopts='opts',
partitionsize=100),
'disk'),
('/b',
DiskFeature(partitionname='/dev/b',
freepct=90.0,
fstype='type',
mountpt='/b',
mountopts='opts',
partitionsize=100),
'disk')])
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_metrics_invm_mode(self, *args):
fc = MetricHostCrawler()
for (k, f, t) in fc.crawl():
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch('utils.metric_utils.round',
side_effect=throw_os_error)
def test_crawl_metrics_invm_mode_failure(self, *args):
with self.assertRaises(OSError):
fc = MetricHostCrawler()
for ff in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('utils.metric_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.metric_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.disk_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_metrics_outcontainer_mode(self, *args):
fc = MetricContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[0].call_count == 1
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch(
("plugins.systems.metric_vm_crawler."
"MetricVmCrawler._crawl_metrics_cpu_percent"),
side_effect=lambda proc: 30.0)
@mock.patch('plugins.systems.metric_vm_crawler.psvmi')
def test_crawl_metrics_vm_mode(self, *args):
fc = MetricVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.cpupct == 30.0
assert f.mempct == 30.0
assert f.pname == 'init'
assert f.pid == 123
assert f.rss == 10
assert f.status == 'Running'
assert f.vms == 20
assert f.read == 10
assert f.write == 20
assert args[1].call_count == 1 # process_iter
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
def test_crawl_connections_invm_mode(self, *args):
fc = ConnectionHostCrawler()
for (k, f, t) in fc.crawl():
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('utils.connection_utils.psutil.process_iter',
side_effect=lambda: [Process('init')])
@mock.patch(
'plugins.systems.connection_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
("plugins.systems.connection_container_crawler.utils.dockerutils."
"exec_dockerinspect"),
side_effect=lambda long_id: {'State': {'Pid': 123}})
def test_crawl_connections_outcontainer_mode(self, *args):
fc = ConnectionContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[0].call_count == 1
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.connection_vm_crawler.psvmi.process_iter',
side_effect=lambda vmc: [Process('init')])
@mock.patch('plugins.systems.connection_vm_crawler.psvmi')
def test_crawl_connections_outvm_mode(self, *args):
fc = ConnectionVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f.localipaddr == '1.1.1.1'
assert f.remoteipaddr == '2.2.2.2'
assert f.localport == '22'
assert f.remoteport == '22'
assert args[1].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(2, 2, 3, 4))
def test_crawl_memory_invm_mode(self, *args):
fc = MemoryHostCrawler()
for (k, f, t) in fc.crawl():
assert f == MemoryFeature(
memory_used=2,
memory_buffered=3,
memory_cached=4,
memory_free=2,
memory_util_percentage=50)
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory',
side_effect=throw_os_error)
def test_crawl_memory_invm_mode_failure(self, *args):
fc = MemoryHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.memory_vm_crawler.psvmi.system_memory_info',
side_effect=lambda vmc: psvmi_memory(10, 20, 30, 40))
@mock.patch('plugins.systems.memory_vm_crawler.psvmi')
def test_crawl_memory_outvm_mode(self, *args):
fc = MemoryVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == MemoryFeature(
memory_used=10,
memory_buffered=20,
memory_cached=30,
memory_free=40,
memory_util_percentage=20)
assert args[1].call_count == 1
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=mocked_memory_cgroup_open)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode(self, *args):
fc = MemoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == MemoryFeature(
memory_used=2,
memory_buffered=200,
memory_cached=100,
memory_free=0,
memory_util_percentage=100)
assert args[1].call_count == 3 # 3 cgroup files
@mock.patch(
'plugins.systems.memory_container_crawler.psutil.virtual_memory',
side_effect=lambda: psutils_memory(
10,
10,
3,
10))
@mock.patch('plugins.systems.memory_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.memory_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_memory_outcontainer_mode_failure(self, *args):
fc = MemoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[1].call_count == 1 # 1 cgroup files
@mock.patch(
'plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
def test_crawl_cpu_invm_mode(self, *args):
fc = CpuHostCrawler()
for (k, f, t) in fc.crawl():
assert f == CpuFeature(
cpu_idle=10,
cpu_nice=20,
cpu_user=30,
cpu_wait=40,
cpu_system=50,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=90)
assert args[0].call_count == 1
@mock.patch('plugins.systems.cpu_host_crawler.psutil.cpu_times_percent',
side_effect=throw_os_error)
def test_crawl_cpu_invm_mode_failure(self, *args):
fc = CpuHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.CpuContainerCrawler._get_scaling_factor',
side_effect=lambda a,b: 1.0)
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=mocked_cpu_cgroup_open)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode(self, *args):
fc = CpuContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == CpuFeature(
cpu_idle=90.0,
cpu_nice=20,
cpu_user=5.0,
cpu_wait=40,
cpu_system=5.0,
cpu_interrupt=60,
cpu_steal=70,
cpu_util=10.0)
assert args[1].call_count == 3 # open for 3 cgroup files
@mock.patch(
'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent',
side_effect=lambda percpu: [
psutils_cpu(
10,
20,
30,
40,
50,
60,
70)])
@mock.patch('plugins.systems.cpu_container_crawler.time.sleep')
@mock.patch('plugins.systems.cpu_container_crawler.open',
side_effect=throw_os_error)
@mock.patch('plugins.systems.cpu_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_cpu_outcontainer_mode_failure(self, *args):
fc = CpuContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'interface1-unit-tests':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_invm_mode(self, *args):
fc = InterfaceHostCrawler()
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl():
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
@mock.patch(
'plugins.systems.interface_host_crawler.psutil.net_io_counters',
side_effect=throw_os_error)
def test_crawl_interface_invm_mode_failure(self, *args):
fc = InterfaceHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
# Each crawl in crawlutils.py instantiates a FeaturesCrawler object
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 2
@mock.patch('plugins.systems.interface_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
@mock.patch(
'plugins.systems.interface_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch(
'plugins.systems.interface_container_crawler.psutil.net_io_counters',
side_effect=lambda pernic: {'eth0':
psutils_net(
10,
20,
30,
40,
50,
60)})
def test_crawl_interface_outcontainer_mode(self, *args):
fc = InterfaceContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl('123'):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[0].call_count == 2
assert args[1].call_count == 2
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.context_init',
side_effect=lambda dn1, dn2, kv, d, a: 1000)
@mock.patch('plugins.systems.interface_vm_crawler.psvmi.interface_iter',
side_effect=lambda vmc: [psvmi_interface(
'eth1', 10, 20, 30, 40, 50, 60)])
@mock.patch('plugins.systems.interface_vm_crawler.psvmi')
def test_crawl_interface_outvm_mode(self, *args):
fc = InterfaceVmCrawler()
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')):
assert f == InterfaceFeature(
if_octets_tx=0,
if_octets_rx=0,
if_packets_tx=0,
if_packets_rx=0,
if_errors_tx=0,
if_errors_rx=0)
assert args[1].call_count == 2
assert args[2].call_count == 2
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
def test_crawl_load_invm_mode(self, *args):
fc = LoadHostCrawler()
for (k, f, t) in fc.crawl():
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[0].call_count == 1
@mock.patch('plugins.systems.load_host_crawler.os.getloadavg',
side_effect=throw_os_error)
def test_crawl_load_invm_mode_failure(self, *args):
fc = LoadHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.load_container_crawler.run_as_another_namespace',
side_effect=mocked_run_as_another_namespace)
@mock.patch('plugins.systems.load_container_crawler.os.getloadavg',
side_effect=lambda: [1, 2, 3])
@mock.patch('plugins.systems.load_container_crawler.DockerContainer',
side_effect=lambda container_id: DummyContainer(container_id))
def test_crawl_load_outcontainer_mode(self, *args):
fc = LoadContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == LoadFeature(shortterm=1, midterm=2, longterm=2)
assert args[1].call_count == 1
assert args[2].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=lambda: [{'State': {'Running': True},
'Image': 'reg/image:latest',
'Config': {'Cmd': 'command'},
'Name': 'name',
'Id': 'id'}])
def test_crawl_dockerps_invm_mode(self, *args):
fc = DockerpsHostCrawler()
for (k, f, t) in fc.crawl():
assert f == DockerPSFeature(
Status=True,
Created=0,
Image='reg/image:latest',
Ports=[],
Command='command',
Names='name',
Id='id')
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps',
side_effect=throw_os_error)
def test_crawl_dockerps_invm_mode_failure(self, *args):
fc = DockerpsHostCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl():
pass
assert args[0].call_count == 1
@mock.patch('plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=lambda long_id: [
{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}])
def test_crawl_dockerhistory_outcontainer_mode(self, *args):
fc = DockerhistoryContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'history': [{'Id': 'image1', 'random': 'abc'},
{'Id': 'image2', 'random': 'abc'}]}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerhistory_container_crawler.exec_docker_history',
side_effect=throw_os_error)
def test_crawl_dockerhistory_outcontainer_mode_failure(self, *args):
fc = DockerhistoryContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=lambda long_id: {
'Id': 'image1',
'random': 'abc'})
def test_crawl_dockerinspect_outcontainer_mode(self, *args):
fc = DockerinspectContainerCrawler()
for (k, f, t) in fc.crawl('123'):
assert f == {'Id': 'image1', 'random': 'abc'}
assert args[0].call_count == 1
@mock.patch(
'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect',
side_effect=throw_os_error)
def test_crawl_dockerinspect_outcontainer_mode_failure(self, *args):
fc = DockerinspectContainerCrawler()
with self.assertRaises(OSError):
for (k, f, t) in fc.crawl('123'):
pass
assert args[0].call_count == 1
|
"""Worker implementation."""
from __future__ import absolute_import, unicode_literals
from .worker import WorkController
__all__ = ('WorkController',)
|
import click
def incomplete(package):
click.echo('{} packages not yet implemented'.format(package))
@click.group()
def run():
'''Build packages inside Docker containers.'''
pass
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def rpm(image):
package = click.style('RPM', fg='red', bold=True)
incomplete(package)
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def deb(image):
package = click.style('Debian', fg='magenta', bold=True)
incomplete(package)
@click.command()
@click.option('--image', '-i', help='image to build in', required=True)
def arch(image):
package = click.style('Arch', fg='cyan', bold=True)
incomplete(package)
run.add_command(rpm)
run.add_command(deb)
run.add_command(arch)
|
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0023_assignment_failed'),
]
operations = [
migrations.AddField(
model_name='certification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='step',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workercertification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflow',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflowversion',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
import threading
import time
import re
from openflow.optin_manager.sfa.openflow_utils.CreateOFSliver import CreateOFSliver
from openflow.optin_manager.sfa.openflow_utils.sliver_status import get_sliver_status
from openflow.optin_manager.sfa.openflow_utils.delete_slice import delete_slice
from openflow.optin_manager.sfa.openflow_utils.rspec3_to_expedient import get_fs_from_group
from openflow.optin_manager.sfa.util.xrn import Xrn
from openflow.optin_manager.opts.models import Experiment, ExperimentFLowSpace
from openflow.optin_manager.xmlrpc_server.models import CallBackServerProxy, FVServerProxy
from django.conf import settings
from openflow.optin_manager.sfa.openflow_utils.ServiceThread import ServiceThread
from openflow.optin_manager.sfa.models import ExpiringComponents
from openflow.optin_manager.sfa.openflow_utils.federationlinkmanager import FederationLinkManager
from openflow.optin_manager.sfa.tests.data_example import test_switches, test_links
class OFShell:
def __init__(self):
pass
@staticmethod
def get_switches(used_switches=[]):
complete_list = []
switches = OFShell().get_raw_switches()
for switch in switches:
if len(used_switches)>0:
if not switch[0] in used_switches:
continue
if int(switch[1]['nPorts']) == 0:
#TODO: Uncomment when merge with ofelia.development
#send_mail('SFA OptinManager Error', 'There are some errors related with switches: GetSwitches() returned 0 ports.',settings.ROOT_EMAIL, [settings.ROOT_EMAIL])
raise Exception("The switch with dpid:%s has a connection problem and the OCF Island Manager has already been informed. Please try again later." % str(switch[0]))
#TODO: Send Mail to the Island Manager Here.
port_list = switch[1]['portNames'].split(',')
ports = list()
for port in port_list:
match = re.match(r'[\s]*(.*)\((.*)\)', port)
ports.append({'port_name':match.group(1), 'port_num':match.group(2)})
complete_list.append({'dpid':switch[0], 'ports':ports})
return complete_list
@staticmethod
def get_links():
links = OFShell().get_raw_links()
link_list = list()
for link in links:
link_list.append({'src':{ 'dpid':link[0],'port':link[1]}, 'dst':{'dpid':link[2], 'port':link[3]}})
#for link in FederationLinkManager.get_federated_links():
# link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
@staticmethod
def get_federation_links():
link_list = list()
for link in FederationLinkManager.get_federated_links():
link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
def GetNodes(self,slice_urn=None,authority=None):
if not slice_urn:
switch_list = self.get_switches()
link_list = self.get_links()
federated_links = self.get_federation_links()
return {'switches':switch_list, 'links':link_list, 'federation_links':federated_links}
else:
nodes = list()
experiments = Experiment.objects.filter(slice_id=slice_urn)
for experiment in experiments:
expfss = ExperimentFLowSpace.objects.filter(exp = experiment.id)
for expfs in expfss:
if not expfs.dpid in nodes:
nodes.append(expfs.dpid)
switches = self.get_switches(nodes)
return {'switches':switches, 'links':[]}
#def GetSlice(self,slicename,authority):
#
# name = slicename
# nodes = self.GetNodes()
# slices = dict()
# List = list()
# return slices
def StartSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=str(slice_urn))
if len(experiments) > 0:
return True
else:
raise ""
def StopSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=slice_urn)
if len(experiments) > 0:
return True
else:
raise ""
def RebootSlice(self, slice_urn):
return self.StartSlice(slice_urn)
def DeleteSlice(self, slice_urn):
try:
delete_slice(slice_urn)
return 1
except Exception as e:
print e
raise ""
def CreateSliver(self, requested_attributes, slice_urn, authority,expiration):
project_description = 'SFA Project from %s' %authority
slice_id = slice_urn
for rspec_attrs in requested_attributes:
switch_slivers = get_fs_from_group(rspec_attrs['match'], rspec_attrs['group'])
controller = rspec_attrs['controller'][0]['url']
email = rspec_attrs['email']
email_pass = ''
slice_description = rspec_attrs['description']
if not self.check_req_switches(switch_slivers):
raise Exception("The Requested OF Switches on the RSpec do not match with the available OF switches of this island. Please check the datapath IDs of your Request RSpec.")
CreateOFSliver(slice_id, authority, project_description, slice_urn, slice_description, controller, email, email_pass, switch_slivers)
if expiration:
#Since there is a synchronous connection, expiring_components table is easier to fill than VTAM
#ExpiringComponents.objects.create(slice=slice_urn, authority=authority, expires=expiration)
pass
return 1
def SliverStatus(self, slice_urn):
try:
print "-----------------------------------------------------------SliverStatus"
sliver_status = get_sliver_status(slice_urn)
print sliver_status
if len(sliver_status) == 0:
xrn = Xrn(slice_urn, 'slice')
slice_leaf = xrn.get_leaf()
sliver_status = ['The requested flowspace for slice %s is still pending for approval' %slice_leaf]
granted_fs = {'granted_flowspaces':get_sliver_status(slice_urn)}
return [granted_fs]
except Exception as e:
import traceback
print traceback.print_exc()
raise e
def check_req_switches(self, switch_slivers):
available_switches = self.get_raw_switches()
for sliver in switch_slivers:
found = False
for switch in available_switches:
if str(sliver['datapath_id']) == str(switch[0]): #Avoiding Unicodes
found = True
break
if found == False:
return False
return True
def get_raw_switches(self):
try:
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
switches = fv.get_switches()
except Exception as e:
switches = test_switches
#raise e
return switches
def get_raw_links(self):
try:
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
links = fv.get_links()
except Exception as e:
links = test_links
#raise e
return links
|
extensions = [
'oslosphinx',
'reno.sphinxext'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Tacker Release Notes'
copyright = u'2016, Tacker Developers'
import pbr.version
tacker_version = pbr.version.VersionInfo('tacker')
release = tacker_version.version_string_with_vcs()
version = tacker_version.canonical_version_string()
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'tackerdoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
latex_documents = [
('index', 'TackerReleaseNotes.tex',
u'Tacker Release Notes Documentation',
u'Tacker Developers', 'manual'),
]
man_pages = [
('index', 'tackerreleasenotes', u'Tacker Release Notes Documentation',
[u'Tacker Developers'], 1)
]
texinfo_documents = [
('index', 'TackerReleaseNotes', u'Tacker Release Notes Documentation',
u'Tacker Developers', 'TackerReleaseNotes',
'Tacker Project.',
'Miscellaneous'),
]
|
from setuptools import setup
DESC = """Installer for Apache Bloodhound
Adds the bloodhound_setup cli command.
"""
versions = [
(0, 8, 0),
(0, 9, 0),
]
latest = '.'.join(str(x) for x in versions[-1])
setup(
name="bloodhound_installer",
version=latest,
description=DESC.split('\n', 1)[0],
author="Apache Bloodhound",
license="Apache License v2",
url="https://bloodhound.apache.org/",
requires=['trac', 'BloodhoundMultiProduct'],
packages=['bhsetup'],
entry_points="""
[console_scripts]
bloodhound_setup = bhsetup.bloodhound_setup:run
""",
long_description=DESC,
)
|
from ggrc import db
from ggrc.models.mixins import (
deferred, Noted, Described, Hyperlinked, WithContact, Titled, Slugged,
)
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.request import Request
class Response(Noted, Described, Hyperlinked, WithContact,
Titled, Slugged, db.Model):
__tablename__ = 'responses'
__mapper_args__ = {
'polymorphic_on': 'response_type',
}
_title_uniqueness = False
_slug_uniqueness = False
# Override `Titled.title` to provide default=""
title = deferred(
db.Column(db.String, nullable=False, default=""), 'Response')
VALID_STATES = (u'Assigned', u'Submitted', u'Accepted', u'Rejected')
VALID_TYPES = (u'documentation', u'interview', u'population sample')
request_id = deferred(
db.Column(db.Integer, db.ForeignKey('requests.id'), nullable=False),
'Response')
response_type = db.Column(db.Enum(*VALID_TYPES), nullable=False)
status = deferred(db.Column(db.String, nullable=False), 'Response')
population_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
sample_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_evidence_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=True),
'Response')
population_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.population_worksheet_id"
)
sample_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_worksheet_id"
)
sample_evidence = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_evidence_id"
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('population_worksheet_document', 'population_worksheet_id'),
db.Index('sample_evidence_document', 'sample_evidence_id'),
db.Index('sample_worksheet_document', 'sample_worksheet_id'),
)
_publish_attrs = [
'request',
'status',
'response_type',
]
_sanitize_html = [
'description',
]
_aliases = {
"description": "Response",
"request": {
"display_name": "Request",
"mandatory": True,
"filter_by": "_filter_by_request",
},
"response_type": {
"display_name": "Response Type",
"mandatory": True,
},
"status": "Status",
"title": None,
"secondary_contact": None,
"notes": None,
}
def _display_name(self):
return u'Response with id={0} for Audit "{1}"'.format(
self.id, self.request.audit.display_name)
@classmethod
def _filter_by_request(cls, predicate):
return Request.query.filter(
(Request.id == cls.request_id) &
predicate(Request.slug)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Response, cls).eager_query()
return query.options(
orm.joinedload('request'))
class DocumentationResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'documentation'
}
_table_plural = 'documentation_responses'
_publish_attrs = []
_sanitize_html = []
class InterviewResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'interview'
}
_table_plural = 'interview_responses'
meetings = db.relationship(
'Meeting',
backref='response',
cascade='all, delete-orphan'
)
_publish_attrs = [
'meetings',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(InterviewResponse, cls).eager_query()
return query.options(
orm.subqueryload('meetings'))
class PopulationSampleResponse(Relatable, Documentable, Personable, Response):
__mapper_args__ = {
'polymorphic_identity': 'population sample'
}
_table_plural = 'population_sample_responses'
_publish_attrs = [
'population_worksheet',
'population_count',
'sample_worksheet',
'sample_count',
'sample_evidence',
]
_sanitize_html = [
'population_count',
'sample_count',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSampleResponse, cls).eager_query()
return query.options(
orm.joinedload('population_worksheet'),
orm.joinedload('sample_worksheet'),
orm.joinedload('sample_evidence'))
|
"""Contains the logic for `aq show rack --rack`."""
from aquilon.aqdb.model import Rack
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
class CommandShowRackRack(BrokerCommand):
required_parameters = ["rack"]
def render(self, session, rack, **arguments):
return Rack.get_unique(session, rack, compel=True)
|
from __future__ import print_function
import unittest
from parallel_executor_test_base import TestParallelExecutorBase, DeviceType
import seresnext_net
import paddle.fluid.core as core
class TestResnetWithReduceBase(TestParallelExecutorBase):
def _compare_reduce_and_allreduce(self, use_device, delta2=1e-5):
if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda():
return
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=False,
optimizer=seresnext_net.optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=True,
optimizer=seresnext_net.optimizer)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
if not use_device:
return
all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=False,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_device),
iter=seresnext_net.iter(use_device),
batch_size=seresnext_net.batch_size(use_device),
use_device=use_device,
use_reduce=True,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(reduce_first_loss, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(reduce_last_loss, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2)
class TestResnetWithReduceCPU(TestResnetWithReduceBase):
def test_seresnext_with_reduce(self):
self._compare_reduce_and_allreduce(
use_device=DeviceType.CPU, delta2=1e-3)
if __name__ == '__main__':
unittest.main()
|
from a10sdk.common.A10BaseClass import A10BaseClass
class PortReservation(A10BaseClass):
"""Class Description::
DS-Lite Static Port Reservation.
Class port-reservation supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param nat_end_port: {"description": "NAT End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param inside: {"optional": false, "type": "string", "description": "Inside User Address and Port Range (DS-Lite Inside User's Tunnel Source IPv6 Address)", "format": "ipv6-address"}
:param tunnel_dest_address: {"optional": false, "type": "string", "description": "DS-Lite Inside User's Tunnel Destination IPv6 Address", "format": "ipv6-address"}
:param inside_start_port: {"description": "Inside Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat: {"optional": false, "type": "string", "description": "NAT Port Range (NAT IP address)", "format": "ipv4-address"}
:param inside_end_port: {"description": "Inside End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat_start_port: {"description": "NAT Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param inside_addr: {"optional": false, "type": "string", "description": "Inside User IP address", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "inside","tunnel_dest_address","inside_addr","inside_start_port","inside_end_port","nat","nat_start_port","nat_end_port"]
self.b_key = "port-reservation"
self.a10_url="/axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}"
self.DeviceProxy = ""
self.nat_end_port = ""
self.uuid = ""
self.inside = ""
self.tunnel_dest_address = ""
self.inside_start_port = ""
self.nat = ""
self.inside_end_port = ""
self.nat_start_port = ""
self.inside_addr = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
from __future__ import absolute_import
from pychron.core.ui import set_qt
from six.moves import range
from six.moves import zip
set_qt()
from traits.api import Any, Str
import os
import struct
from numpy import array
from pychron.core.helpers.filetools import pathtolist
from pychron.loggable import Loggable
from pychron.core.helpers.logger_setup import logging_setup
from pychron.mass_spec.database.massspec_database_adapter import MassSpecDatabaseAdapter
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.experiment.utilities.identifier import (
convert_identifier_to_int,
strip_runid,
)
logging_setup("ms_reverter")
class MassSpecReverter(Loggable):
"""
use to revert data from Pychron to MassSpec.
uses the MassSpecDatabasereverter to do the actual work.
This class takes a list of run ids, extracts data from
the pychron database, prepares data for use with MassSpecDatabasereverter,
then writes to the MassSpec database
"""
source = Any
destination = Any
path = Str
def do_revert(self):
# if self._connect_to_source():
if self._connect_to_destination():
self._do_revert()
def do_reimport(self):
if self._connect_to_source():
if self._connect_to_destination():
self._do_reimport()
def setup_source(self):
src = IsotopeDatabaseManager(connect=False, bind=False)
db = src.db
db.trait_set(
name="pychrondata",
kind="mysql",
host=os.environ.get("HOST"),
username="root",
password=os.environ.get("DB_PWD"),
)
self.source = src
def setup_destination(self):
dest = MassSpecDatabaseAdapter()
dest.trait_set(
name="massspecdata_crow",
kind="mysql",
username="root",
password=os.environ.get("DB_PWD"),
)
self.destination = dest
def _connect_to_source(self):
return self.source.connect()
def _connect_to_destination(self):
return self.destination.connect()
def _load_runids(self):
runids = pathtolist(self.path)
return runids
def _do_reimport(self):
rids = self._load_runids()
for rid in rids:
self._reimport_rid(rid)
def _reimport_rid(self, rid):
self.debug("========= Reimport {} =========".format(rid))
dest = self.destination
src_an = self._get_analysis_from_source(rid)
if src_an is None:
self.warning("could not find {}".format(rid))
else:
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
pb, pbnc = self._generate_blobs(src_an, iso.Label)
pt = iso.peak_time_series[0]
pt.PeakTimeBlob = pb
pt.PeakNeverBslnCorBlob = pbnc
dest.commit()
def _generate_blobs(self, src, isok):
dbiso = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "signal"
),
None,
)
dbiso_bs = next(
(
i
for i in src.isotopes
if i.molecular_weight.name == isok and i.kind == "baseline"
),
None,
)
xs, ys = self._unpack_data(dbiso.signal.data)
bsxs, bsys = self._unpack_data(dbiso_bs.signal.data)
bs = bsys.mean()
cys = ys - bs
ncblob = "".join([struct.pack(">f", v) for v in ys])
cblob = "".join([struct.pack(">ff", y, x) for y, x in zip(cys, xs)])
return cblob, ncblob
def _unpack_data(self, blob):
endianness = ">"
sx, sy = list(
zip(
*[
struct.unpack("{}ff".format(endianness), blob[i : i + 8])
for i in range(0, len(blob), 8)
]
)
)
return array(sx), array(sy)
def _get_analysis_from_source(self, rid):
if rid.count("-") > 1:
args = rid.split("-")
step = None
lan = "-".join(args[:-1])
aliquot = args[-1]
else:
lan, aliquot, step = strip_runid(rid)
lan = convert_identifier_to_int(lan)
db = self.source.db
dban = db.get_unique_analysis(lan, aliquot, step)
return dban
def _do_revert(self):
rids = self._load_runids()
for rid in rids:
self._revert_rid(rid)
def _revert_rid(self, rid):
"""
rid: str. typical runid e.g 12345, 12345-01, 12345-01A
if rid lacks an aliquot revert all aliquots and steps for
this rid
"""
self.debug("reverting {}".format(rid))
if "-" in rid:
# this is a specific analysis
self._revert_analysis(rid)
else:
self._revert_analyses(rid)
def _revert_analyses(self, rid):
"""
rid: str. e.g 12345
revert all analyses with this labnumber
"""
def _revert_analysis(self, rid):
"""
rid: str. e.g 12345-01 or 12345-01A
only revert this specific analysis
"""
# l,a,s = strip_runid(rid)
# db = self.source.db
dest = self.destination
# with db.session_ctx():
self.debug("========= Revert {} =========".format(rid))
dest_an = dest.get_analysis_rid(rid)
for iso in dest_an.isotopes:
isol = iso.Label
self.debug("{} reverting isotope id = {}".format(isol, iso.IsotopeID))
# fix IsotopeTable.NumCnts
n = len(iso.peak_time_series[0].PeakTimeBlob) / 8
self.debug(
"{} fixing NumCnts. current={} new={}".format(isol, iso.NumCnts, n)
)
iso.NumCnts = n
nf = len(iso.peak_time_series)
if nf > 1:
self.debug("{} deleting {} refits".format(isol, nf - 1))
# delete peak time blobs
for i, pt in enumerate(iso.peak_time_series[1:]):
self.debug(
"{} A {:02d} deleting pt series {}".format(
isol, i + 1, pt.Counter
)
)
dest.delete(pt)
# delete isotope results
for i, ir in enumerate(iso.results[1:]):
self.debug(
"{} B {:02d} deleting results {}".format(
isol, i + 1, ir.Counter
)
)
dest.delete(ir)
dest.commit()
if __name__ == "__main__":
m = MassSpecReverter(path="/Users/ross/Sandbox/crow_revert.txt")
m.setup_source()
m.setup_destination()
m.do_reimport()
# m.do_revert()
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.filter import Filter
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseFilterTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Dependencies,
'java_library': JavaLibrary,
'page': Page,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
}
)
@classmethod
def task_type(cls):
return Filter
class FilterEmptyTargetsTest(BaseFilterTest):
def test_no_filters(self):
self.assert_console_output()
def test_type(self):
self.assert_console_output(options={'type': ['page']})
self.assert_console_output(options={'type': ['java_library']})
def test_regex(self):
self.assert_console_output(options={'regex': ['^common']})
self.assert_console_output(options={'regex': ['-^common']})
class FilterTest(BaseFilterTest):
def setUp(self):
super(FilterTest, self).setUp()
requirement_injected = set()
def add_to_build_file(path, name, *deps):
if path not in requirement_injected:
self.add_to_build_file(path, "python_requirement_library(name='foo')")
requirement_injected.add(path)
all_deps = ["'{0}'".format(dep) for dep in deps] + ["':foo'"]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
dependencies=[{all_deps}],
tags=['{tag}']
)
""".format(name=name, tag=name + "_tag", all_deps=','.join(all_deps))))
add_to_build_file('common/a', 'a')
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('overlaps', 'one', 'common/a', 'common/b')
add_to_build_file('overlaps', 'two', 'common/a', 'common/c')
add_to_build_file('overlaps', 'three', 'common/a', 'overlaps:one')
def test_roots(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
extra_targets=self.targets('overlaps/::')
)
def test_nodups(self):
targets = [self.target('common/b')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'common/b:b',
targets=targets
)
def test_no_filters(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::')
)
def test_filter_type(self):
self.assert_console_output(
'common/a:a',
'common/b:b',
'common/c:c',
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'type': ['python_library']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:foo',
targets=self.targets('::'),
options={'type': ['-python_library']}
)
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
# Note that the comma is inside the string, so these are ORed.
options={'type': ['python_requirement_library,python_library']}
)
def test_filter_multiple_types(self):
# A target can only have one type, so the output should be empty.
self.assert_console_output(
targets=self.targets('::'),
options={'type': ['python_requirement_library', 'python_library']}
)
def test_filter_target(self):
self.assert_console_output(
'common/a:a',
'overlaps:foo',
targets=self.targets('::'),
options={'target': ['common/a,overlaps/:foo']}
)
self.assert_console_output(
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'target': ['-common/a:a,overlaps:one,overlaps:foo']}
)
def test_filter_ancestor(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'overlaps:one',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['overlaps:one,overlaps:foo']}
)
self.assert_console_output(
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'ancestor': ['-overlaps:one,overlaps:foo']}
)
def test_filter_ancestor_out_of_context(self):
"""Tests that targets outside of the context used as filters are parsed before use."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['-blacklist']}
)
def test_filter_ancestor_not_passed_targets(self):
"""Tests filtering targets based on an ancestor not in that list of targets."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'), # blacklist is not in the list of targets
options={'ancestor': ['-blacklist']}
)
self.assert_console_output(
'common/a:a', # a: _should_ show up if we don't filter.
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
options={'ancestor': []}
)
def test_filter_regex(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('::'),
options={'regex': ['^common']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'regex': ['+foo,^overlaps']}
)
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'regex': ['-^common,foo$']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'regex': ['abc)']}
)
def test_filter_tag_regex(self):
# Filter two.
self.assert_console_output(
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['+e(?=e)']}
)
# Removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['-one|two']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'tag_regex': ['abc)']}
)
def test_filter_tag(self):
# One match.
self.assert_console_output(
'common/a:a',
targets=self.targets('::'),
options={'tag': ['+a_tag']}
)
# Two matches.
self.assert_console_output(
'common/a:a',
'common/b:b',
targets=self.targets('::'),
options={'tag': ['+a_tag,b_tag']}
)
# One removal.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag']}
)
# Two removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag,two_tag']}
)
# No match.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['+abcdefg_tag']}
)
# No match due to AND of separate predicates.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['a_tag', 'b_tag']}
)
|
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warning(_('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = instance_obj.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
#fixed IP address is optional
#if the fixed IP address is not provided then
#it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.NetworkNotFound,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.InvalidBDM,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.image_service.\
show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
|
import copy
import fixtures
import time
from oslo_config import cfg
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova import utils
from nova.virt import fake
CONF = cfg.CONF
class TestEvacuateResourceTrackerRace(
test.TestCase, integrated_helpers.InstanceHelperMixin,
):
"""Demonstrate bug #1896463.
Trigger a race condition between an almost finished evacuation that is
dropping the migration context, and the _update_available_resource()
periodic task that already loaded the instance list but haven't loaded the
migration list yet. The result is that the PCI allocation made by the
evacuation is deleted by the overlapping periodic task run and the instance
will not have PCI allocation after the evacuation.
"""
def setUp(self):
super().setUp()
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_mac_by_pci_address',
return_value='52:54:00:1e:59:c6'))
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_vf_num_by_pci_address',
return_value=1))
self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = 'latest'
self.api = self.admin_api
self.start_service('conductor')
self.start_service('scheduler')
self.flags(compute_driver='fake.FakeDriverWithPciResources')
self.useFixture(
fake.FakeDriverWithPciResources.
FakeDriverWithPciResourcesConfigFixture())
self.compute1 = self._start_compute('host1')
self.compute1_id = self._get_compute_node_id_by_host('host1')
self.compute1_service_id = self.admin_api.get_services(
host='host1', binary='nova-compute')[0]['id']
self.compute2 = self._start_compute('host2')
self.compute2_id = self._get_compute_node_id_by_host('host2')
self.compute2_service_id = self.admin_api.get_services(
host='host2', binary='nova-compute')[0]['id']
# add extra ports and the related network to the neutron fixture
# specifically for these tests. It cannot be added globally in the
# fixture init as it adds a second network that makes auto allocation
# based test to fail due to ambiguous networks.
self.neutron._ports[self.neutron.sriov_port['id']] = \
copy.deepcopy(self.neutron.sriov_port)
self.neutron._networks[
self.neutron.network_2['id']] = self.neutron.network_2
self.neutron._subnets[
self.neutron.subnet_2['id']] = self.neutron.subnet_2
self.ctxt = context.get_admin_context()
def _get_compute_node_id_by_host(self, host):
# we specifically need the integer id of the node not the UUID so we
# need to use the old microversion
with utils.temporary_mutation(self.admin_api, microversion='2.52'):
hypers = self.admin_api.api_get(
'os-hypervisors').body['hypervisors']
for hyper in hypers:
if hyper['hypervisor_hostname'] == host:
return hyper['id']
self.fail('Hypervisor with hostname=%s not found' % host)
def _assert_pci_device_allocated(
self, instance_uuid, compute_node_id, num=1):
"""Assert that a given number of PCI devices are allocated to the
instance on the given host.
"""
devices = objects.PciDeviceList.get_by_instance_uuid(
self.ctxt, instance_uuid)
devices_on_host = [dev for dev in devices
if dev.compute_node_id == compute_node_id]
self.assertEqual(num, len(devices_on_host))
def test_evacuate_races_with_update_available_resource(self):
# Create a server with a direct port to have PCI allocation
server = self._create_server(
name='test-server-for-bug-1896463',
networks=[{'port': self.neutron.sriov_port['id']}],
host='host1'
)
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(
server['id'], self.compute2_id, num=0)
# stop and force down the compute the instance is on to allow
# evacuation
self.compute1.stop()
self.admin_api.put_service(
self.compute1_service_id, {'forced_down': 'true'})
# Inject some sleeps both in the Instance.drop_migration_context and
# the MigrationList.get_in_progress_and_error code to make them
# overlap.
# We want to create the following execution scenario:
# 1) The evacuation makes a move claim on the dest including the PCI
# claim. This means there is a migration context. But the evacuation
# is not complete yet so the instance.host does not point to the
# dest host.
# 2) The dest resource tracker starts an _update_available_resource()
# periodic task and this task loads the list of instances on its
# host from the DB. Our instance is not in this list due to #1.
# 3) The evacuation finishes, the instance.host is set to the dest host
# and the migration context is deleted.
# 4) The periodic task now loads the list of in-progress migration from
# the DB to check for incoming our outgoing migrations. However due
# to #3 our instance is not in this list either.
# 5) The periodic task cleans up every lingering PCI claim that is not
# connected to any instance collected above from the instance list
# and from the migration list. As our instance is not in either of
# the lists, the resource tracker cleans up the PCI allocation for
# the already finished evacuation of our instance.
#
# Unfortunately we cannot reproduce the above situation without sleeps.
# We need that the evac starts first then the periodic starts, but not
# finishes, then evac finishes, then periodic finishes. If I trigger
# and run the whole periodic in a wrapper of drop_migration_context
# then I could not reproduce the situation described at #4). In general
# it is not
#
# evac
# |
# |
# | periodic
# | |
# | |
# | x
# |
# |
# x
#
# but
#
# evac
# |
# |
# | periodic
# | |
# | |
# | |
# x |
# |
# x
#
# what is needed need.
#
# Starting the periodic from the test in a separate thread at
# drop_migration_context() might work but that is an extra complexity
# in the test code. Also it might need a sleep still to make the
# reproduction stable but only one sleep instead of two.
orig_drop = objects.Instance.drop_migration_context
def slow_drop(*args, **kwargs):
time.sleep(1)
return orig_drop(*args, **kwargs)
self.useFixture(
fixtures.MockPatch(
'nova.objects.instance.Instance.drop_migration_context',
new=slow_drop))
orig_get_mig = objects.MigrationList.get_in_progress_and_error
def slow_get_mig(*args, **kwargs):
time.sleep(2)
return orig_get_mig(*args, **kwargs)
self.useFixture(
fixtures.MockPatch(
'nova.objects.migration.MigrationList.'
'get_in_progress_and_error',
new=slow_get_mig))
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# we trigger the _update_available_resource periodic to overlap with
# the already started evacuation
self._run_periodics()
self._wait_for_server_parameter(
server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
|
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
Snapshot,
Timestamp,
)
from pgshovel.utilities.conversions import (
RowConverter,
to_snapshot,
to_timestamp,
)
from tests.pgshovel.streams.fixtures import reserialize
def test_row_conversion():
converter = RowConverter(sorted=True) # maintain sort order for equality checks
row = reserialize(
Row(
columns=[
Column(name='active', boolean=True),
Column(name='biography'),
Column(name='id', integer64=9223372036854775807),
Column(name='reputation', float=1.0),
Column(name='username', string='bob'),
],
),
)
decoded = converter.to_python(row)
assert decoded == {
'id': 9223372036854775807,
'username': 'bob',
'active': True,
'reputation': 1.0,
'biography': None,
}
assert converter.to_protobuf(decoded) == row
def test_snapshot_conversion():
assert to_snapshot('1:10:') == Snapshot(
min=1,
max=10,
)
def test_snapshot_conversion_in_progress():
assert to_snapshot('1:10:2,3,4') == Snapshot(
min=1,
max=10,
active=[2, 3, 4],
)
def test_timetamp_conversion():
assert to_timestamp(1438814328.940597) == Timestamp(
seconds=1438814328,
nanos=940597057, # this is different due to floating point arithmetic
)
|
import matplotlib.pyplot as plt
from numpy import exp, pi, sqrt, hstack, arange
from numpy.random.mtrand import normal
def unmix(ages, ps, ts):
"""
ages = list of 2-tuples (age, 1sigma )
:param ages:
:param ps:
:param ts:
:return:
"""
niterations = 20
for _ in range(niterations):
tis_n = []
pis_n = []
for pi, ti in zip(ps, ts):
pn, tn = tj(ages, pi, ti, ps, ts)
tis_n.append(tn)
pis_n.append(pn)
ps = pis_n
ts = tis_n
return ps, ts
def si(ai, ei, ps, ts):
return sum([pk * fij(ai, ei, tk) for pk, tk in zip(ps, ts)])
def tj(ages, pj, to, ps, ts):
n = len(ages)
pj = 1 / n * sum([pj * fij(ai, ei, to) / si(ai, ei, ps, ts) for ai, ei in ages])
a = [pj * ai * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
b = [pj * fij(ai, ei, to) / (ei ** 2 * si(ai, ei, ps, ts)) for ai, ei in ages]
return pj, sum(a) / sum(b)
def fij(ai, ei, tj):
return 1 / (ei * sqrt(2 * pi)) * exp(-((ai - tj) ** 2) / (2 * ei ** 2))
if __name__ == "__main__":
# [35.27,36.27] [0.59, 0.41]
# p = '/Users/ross/Sandbox/unmix_data.txt'
# with open(p, 'U') as rfile:
# reader = csv.reader(rfile, delimiter='\t')
# ages, errors = [], []
#
# for line in reader:
# age = float(line[0])
# error = float(line[1])
# ages.append(age)
# errors.append(error)
# a = np.random.normal(35, 1, 10)
# b = np.random.normal(35, 1, 10)
# c = np.random.normal(35, 1, 10)
# for ai, aj, ak in zip(a, b, c):
# ps = np.random.random_sample(3)
# t = ps.sum()
# ps = ps / t
#
# initial_guess = [[ai, aj, ak], ps]
# # print 'initial', initial_guess
# # initial_guess = [[30, 40], [0.9, 0.1]]
# print(unmix(ages, errors, initial_guess))
a = normal(35, 0.1, 10)
b = normal(35.5, 0.1, 10)
ages = hstack((a, b))
errors = [0.1] * 20
ts = [35, 35.5]
ps = [0.9, 0.1]
plt.plot(sorted(a), arange(10), "bo")
plt.plot(sorted(b), arange(10, 20, 1), "ro")
print(unmix(ages, errors, ps, ts))
plt.show()
|
import json
import os
import subprocess
import tempfile
import time
import unittest
from unittest import mock
import psutil
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import webserver_command
from airflow.cli.commands.webserver_command import GunicornMonitor
from airflow.utils.cli import setup_locations
from tests.test_utils.config import conf_vars
class TestGunicornMonitor(unittest.TestCase):
def setUp(self) -> None:
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
mock.patch.object(self.monitor, '_generate_plugin_state', return_value={}).start()
mock.patch.object(self.monitor, '_get_num_ready_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_get_num_workers_running', return_value=4).start()
mock.patch.object(self.monitor, '_spawn_new_workers', return_value=None).start()
mock.patch.object(self.monitor, '_kill_old_workers', return_value=None).start()
mock.patch.object(self.monitor, '_reload_gunicorn', return_value=None).start()
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_wait_for_workers_to_start(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 0
self.monitor._get_num_workers_running.return_value = 4
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_kill_excess_workers(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 10
self.monitor._get_num_workers_running.return_value = 10
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_missing(self, mock_sleep):
self.monitor._get_num_ready_workers_running.return_value = 2
self.monitor._get_num_workers_running.return_value = 2
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_start_new_workers_when_refresh_interval_has_passed(self, mock_sleep):
self.monitor._last_refresh_time -= 200
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_called_once_with(2) # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
@mock.patch('airflow.cli.commands.webserver_command.sleep')
def test_should_reload_when_plugin_has_been_changed(self, mock_sleep):
self.monitor._generate_plugin_state.return_value = {'AA': 12}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_not_called() # pylint: disable=no-member
self.monitor._generate_plugin_state.return_value = {'AA': 32}
self.monitor._check_workers()
self.monitor._spawn_new_workers.assert_not_called() # pylint: disable=no-member
self.monitor._kill_old_workers.assert_not_called() # pylint: disable=no-member
self.monitor._reload_gunicorn.assert_called_once_with() # pylint: disable=no-member
self.assertAlmostEqual(self.monitor._last_refresh_time, time.monotonic(), delta=5)
class TestGunicornMonitorGeneratePluginState(unittest.TestCase):
@staticmethod
def _prepare_test_file(filepath: str, size: int):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as file:
file.write("A" * size)
file.flush()
def test_should_detect_changes_in_directory(self):
with tempfile.TemporaryDirectory() as tempdir, mock.patch(
"airflow.cli.commands.webserver_command.settings.PLUGINS_FOLDER", tempdir
):
self._prepare_test_file(f"{tempdir}/file1.txt", 100)
self._prepare_test_file(f"{tempdir}/nested/nested/nested/nested/file2.txt", 200)
self._prepare_test_file(f"{tempdir}/file3.txt", 300)
monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
# When the files have not changed, the result should be constant
state_a = monitor._generate_plugin_state()
state_b = monitor._generate_plugin_state()
self.assertEqual(state_a, state_b)
self.assertEqual(3, len(state_a))
# Should detect new file
self._prepare_test_file(f"{tempdir}/file4.txt", 400)
state_c = monitor._generate_plugin_state()
self.assertNotEqual(state_b, state_c)
self.assertEqual(4, len(state_c))
# Should detect changes in files
self._prepare_test_file(f"{tempdir}/file4.txt", 450)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
# Should support large files
self._prepare_test_file(f"{tempdir}/file4.txt", 4000000)
state_d = monitor._generate_plugin_state()
self.assertNotEqual(state_c, state_d)
self.assertEqual(4, len(state_d))
class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self):
self.children = mock.MagicMock()
self.child = mock.MagicMock()
self.process = mock.MagicMock()
self.monitor = GunicornMonitor(
gunicorn_master_pid=1,
num_workers_expected=4,
master_timeout=60,
worker_refresh_interval=60,
worker_refresh_batch_size=2,
reload_on_plugin_change=True,
)
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(self.monitor._get_num_ready_workers_running(), 0)
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self) -> None:
self._check_processes()
self._clean_pidfiles()
def _check_processes(self, ignore_running=False):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
exit_code_pgrep_webserver = subprocess.Popen(["pgrep", "-c", "-f", "airflow webserver"]).wait()
exit_code_pgrep_gunicorn = subprocess.Popen(["pgrep", "-c", "-f", "gunicorn"]).wait()
if exit_code_pgrep_webserver != 1 or exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["ps", "-ax"]).wait()
if exit_code_pgrep_webserver != 1:
subprocess.Popen(["pkill", "-9", "-f", "airflow webserver"]).wait()
if exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["pkill", "-9", "-f", "gunicorn"]).wait()
if not ignore_running:
raise AssertionError(
"Background processes are running that prevent the test from passing successfully."
)
def tearDown(self) -> None:
self._check_processes(ignore_running=True)
self._clean_pidfiles()
def _clean_pidfiles(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
start_time = time.monotonic()
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
if start_time - time.monotonic() > 60:
raise
time.sleep(1)
def test_cli_webserver_foreground(self):
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
self.assertEqual(None, proc.poll())
# Wait for process
time.sleep(10)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
def test_cli_webserver_foreground_with_pid(self):
with tempfile.TemporaryDirectory(prefix='tmp-pid') as tmpdir:
pidfile = f"{tmpdir}/pidfile"
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
self.assertEqual(None, proc.poll())
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
self.assertEqual(0, proc.wait(60))
@pytest.mark.quarantined
def test_cli_webserver_background(self):
with tempfile.TemporaryDirectory(prefix="gunicorn") as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
pidfile_webserver = f"{tmpdir}/pidflow-webserver.pid"
pidfile_monitor = f"{tmpdir}/pidflow-webserver-monitor.pid"
stdout = f"{tmpdir}/airflow-webserver.out"
stderr = f"{tmpdir}/airflow-webserver.err"
logfile = f"{tmpdir}/airflow-webserver.log"
try:
# Run webserver as daemon in background. Note that the wait method is not called.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--daemon",
"--pid",
pidfile_webserver,
"--stdout",
stdout,
"--stderr",
stderr,
"--log-file",
logfile,
]
)
self.assertEqual(None, proc.poll())
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(
0, subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
)
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
self.assertIn(proc.wait(120), (0, None))
self._check_processes()
except Exception:
# List all logs
subprocess.Popen(["ls", "-lah", tmpdir]).wait()
# Dump all logs
subprocess.Popen(["bash", "-c", f"ls {tmpdir}/* | xargs -n 1 -t cat"]).wait()
raise
# Patch for causing webserver timeout
@mock.patch(
"airflow.cli.commands.webserver_command.GunicornMonitor._get_num_workers_running", return_value=0
)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
webserver_command.webserver(args)
self.assertEqual(e.exception.code, 1)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
time.sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None, return_code, f"webserver terminated with return code {return_code} in debug mode"
)
proc.terminate()
self.assertEqual(-15, proc.wait(60))
def test_cli_webserver_access_log_format(self):
# json access log format
access_logformat = (
"{\"ts\":\"%(t)s\",\"remote_ip\":\"%(h)s\",\"request_id\":\"%({"
"X-Request-Id}i)s\",\"code\":\"%(s)s\",\"request_method\":\"%(m)s\","
"\"request_path\":\"%(U)s\",\"agent\":\"%(a)s\",\"response_time\":\"%(D)s\","
"\"response_length\":\"%(B)s\"} "
)
with tempfile.TemporaryDirectory() as tmpdir, mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1",
):
access_logfile = f"{tmpdir}/access.log"
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(
[
"airflow",
"webserver",
"--access-logfile",
access_logfile,
"--access-logformat",
access_logformat,
]
)
self.assertEqual(None, proc.poll())
# Wait for webserver process
time.sleep(10)
proc2 = subprocess.Popen(["curl", "http://localhost:8080"])
proc2.wait(10)
try:
file = open(access_logfile)
log = json.loads(file.read())
self.assertEqual('127.0.0.1', log.get('remote_ip'))
self.assertEqual(len(log), 9)
self.assertEqual('GET', log.get('request_method'))
except OSError:
print("access log file not found at " + access_logfile)
# Terminate webserver
proc.terminate()
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(60), (-15, 0))
self._check_processes()
|
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._actor_id))
self._triggered = True
if self.repeats:
self.reset()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def repeat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def register(node, actor, events=None):
"""
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
@TODO: Handle migration (automagically and otherwise.)
"""
return TimerHandler(node=node, actor=actor)
|
import io
import os
import unittest
import logging
import uuid
from mediafire import MediaFireApi, MediaFireUploader, UploadSession
from mediafire.uploader import UPLOAD_SIMPLE_LIMIT_BYTES
APP_ID = '42511'
MEDIAFIRE_EMAIL = os.environ.get('MEDIAFIRE_EMAIL')
MEDIAFIRE_PASSWORD = os.environ.get('MEDIAFIRE_PASSWORD')
class MediaFireSmokeBaseTestCase(object):
"""Smoke tests for API"""
class BaseTest(unittest.TestCase):
def setUp(self):
# Reset logging to info to avoid leaking credentials
logger = logging.getLogger('mediafire.api')
logger.setLevel(logging.INFO)
self.api = MediaFireApi()
session = self.api.user_get_session_token(
app_id=APP_ID, email=MEDIAFIRE_EMAIL,
password=MEDIAFIRE_PASSWORD)
self.api.session = session
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
class MediaFireSmokeSimpleTest(MediaFireSmokeBaseTestCase.BaseTest):
"""Simple tests"""
def test_user_get_info(self):
result = self.api.user_get_info()
self.assertEqual(result["user_info"]["display_name"],
u"Coalmine Smoketest")
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
class MediaFireSmokeWithDirectoryTest(MediaFireSmokeBaseTestCase.BaseTest):
"""Smoke tests requiring temporary directory"""
def setUp(self):
super(MediaFireSmokeWithDirectoryTest, self).setUp()
folder_uuid = str(uuid.uuid4())
result = self.api.folder_create(foldername=folder_uuid)
self.folder_key = result["folder_key"]
def tearDown(self):
self.api.folder_purge(self.folder_key)
def test_upload_small(self):
"""Test simple upload"""
# make sure we most likely will get upload/simple
data = b'This is a tiny file content: ' + os.urandom(32)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'smallfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/simple')
def test_upload_large(self):
"""Test large file upload"""
# make sure we will get upload/resumable, prefix + 4MiB
data = b'Long line is long: ' + os.urandom(UPLOAD_SIMPLE_LIMIT_BYTES)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'bigfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/resumable')
if __name__ == "__main__":
unittest.main()
|
from nose.tools import (assert_is_none, assert_is_instance, assert_in,
assert_is_not_none, assert_true, assert_false,
assert_equal)
from datetime import datetime
from mongoengine import connect
from qirest_client.model.subject import Subject
from qirest_client.model.uom import Weight
from qirest_client.model.clinical import (Biopsy, Surgery, Drug)
from qirest.test.helpers import seed
MODELING_RESULT_PARAMS = ['fxl_k_trans', 'fxr_k_trans', 'delta_k_trans', 'v_e', 'tau_i']
"""The test seed modeling result parameters."""
class TestSeed(object):
"""
This TestSeed class tests the seed helper utility.
Note: this test drops the ``qiprofile-test`` Mongo database
at the beginning and end of execution.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
self._subjects = seed.seed()
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_serialization(self):
for saved_sbj in self._subjects:
query = dict(project=saved_sbj.project,
collection=saved_sbj.collection,
number=saved_sbj.number)
fetched_sbj = Subject.objects.get(**query)
self._validate_subject(fetched_sbj)
SESSION_CNT = dict(
Breast=4,
Sarcoma=3
)
def test_reseed(self):
subjects = seed.seed()
expected = set(str(sbj) for sbj in self._subjects)
actual = set(str(sbj) for sbj in subjects)
assert_equal(actual, expected, "Reseed result is incorrect -"
"\nexpected:\n%s\nfound:\n%s" %
(expected, actual))
def _validate_subject(self, subject):
collections = ((coll.name for coll in seed.COLLECTION_BUILDERS))
assert_in(subject.collection, collections,
"Collection is invalid: %s" % subject.collection)
self._validate_demographics(subject)
self._validate_clincal_data(subject)
self._validate_sessions(subject)
def _validate_demographics(self, subject):
assert_is_not_none(subject.gender, "%s is missing gender" % subject)
def _validate_clincal_data(self, subject):
# There are three treatments.
self._validate_treatments(subject)
# Validate the clinical encounters.
self._validate_clinical_encounters(subject)
def _validate_treatments(self, subject):
# There are three treatments.
treatments = subject.treatments
assert_is_not_none(treatments, "%s has no treatments" % subject)
assert_equal(len(treatments), 3,
"%s Subject %d treatments count is incorrect: %d" %
(subject.collection, subject.number, len(treatments)))
# Breast has neoadjuvant drugs.
if subject.collection == 'Breast':
self._validate_breast_treatments(subject, treatments)
def _validate_breast_treatments(self, subject, treatments):
# Breast has neoadjuvant drugs.
neo_rx = next(((trt for trt in treatments if trt.treatment_type == 'Neoadjuvant')),
None)
assert_is_not_none(neo_rx, ("%s Subject %d is missing a neodjuvant" +
" treatment") % (subject.collection, subject.number))
dosages = neo_rx.dosages
assert_equal(len(dosages), 2,
(("%s session %d neoadjuvant treatment dosage count is" +
" incorrect: %d") % (subject.collection, subject.number, len(dosages))))
# Validate the agent type and dosage unit.
for dosage in dosages:
agent = dosage.agent
assert_is_instance(agent, Drug,
"%s Subject %d neoadjuvant agent is not a drug" %
(subject.collection, subject.number))
amount = dosage.amount
assert_is_not_none(amount, ("%s Subject %d is missing a neodjuvant drug" +
" dosage amount") % (subject.collection, subject.number))
def _validate_clinical_encounters(self, subject):
# There are two clinical encounters.
cln_encs = list(subject.clinical_encounters)
assert_is_not_none(cln_encs, "%s has no encounters" % subject)
assert_equal(len(cln_encs), 2,
"%s Subject %d encounter count is incorrect: %d" %
(subject.collection, subject.number, len(cln_encs)))
# Each encounter has a subject weight.
for enc in cln_encs:
assert_is_not_none(enc.weight, "%s encounter %s is missing the"
" subject weight" % (subject, enc))
assert_is_instance(enc.weight, int,
"%s encounter %s weight type is incorrect: %s" %
(subject, enc, enc.weight.__class__))
# There is a biopsy with a pathology report.
biopsy = next((enc for enc in cln_encs if isinstance(enc, Biopsy)),
None)
assert_is_not_none(biopsy, "%s Subject %d is missing a biopsy" %
(subject.collection, subject.number))
self._validate_pathology(subject, biopsy.pathology)
# Breast pre-neoadjuvant biopsy does not have a RCB.
if subject.collection == 'Breast':
tumor_pathology = biopsy.pathology.tumors[0]
assert_is_none(tumor_pathology.rcb,
"%s biopsy pathology report incorrectly has a RCB"
" status" % subject)
# There is a surgery with a pathology report.
surgery = next((enc for enc in cln_encs if isinstance(enc, Surgery)),
None)
assert_is_not_none(surgery, "%s Subject %d is missing a surgery" %
(subject.collection, subject.number))
assert_is_not_none(surgery.pathology,
"%s surgery is missing a pathology report" % subject)
self._validate_pathology(subject, surgery.pathology)
# Surgery has a RCB.
if subject.collection == 'Breast':
tumor_pathology = surgery.pathology.tumors[0]
assert_is_not_none(tumor_pathology.rcb,
"%s surgery pathology report is missing a"
" RCB status" % subject)
def _validate_pathology(self, subject, pathology_report):
assert_is_not_none(pathology_report, "%s is missing a pathology"
" report" % subject)
assert_false(len(pathology_report.tumors) == 0,
"%s has no pathology tumor report")
for tumor_pathology in pathology_report.tumors:
self._validate_tnm(subject, tumor_pathology.tnm)
# The tumor-specific tests.
if subject.collection == 'Breast':
self._validate_breast_pathology(subject, tumor_pathology)
elif subject.collection == 'Sarcoma':
self._validate_sarcoma_pathology(subject, tumor_pathology)
def _validate_tnm(self, subject, tnm):
assert_is_not_none(tnm, "%s is missing a TNM" % subject)
assert_is_not_none(tnm.tumor_type,
"%s TNM is missing the tumor type" % subject)
assert_is_not_none(tnm.grade,
"%s TNM is missing the grade" % subject)
assert_is_not_none(tnm.size,
"%s TNM is missing the composite size object" %
subject)
assert_is_not_none(tnm.size.tumor_size,
"%s TNM is missing the size score" % subject)
assert_is_not_none(tnm.lymph_status,
"%s TNM is missing the lymph status" % subject)
assert_is_not_none(tnm.lymphatic_vessel_invasion,
"%s TNM is missing the lymphati vessel invasion"
% subject)
assert_is_not_none(tnm.metastasis,
"%s TNM is missing the metastasis" % subject)
def _validate_breast_pathology(self, subject, pathology):
estrogen = next((hr for hr in pathology.hormone_receptors
if hr.hormone == 'estrogen'),
None)
assert_is_not_none(estrogen, "%s pathology report is missing"
" an estrogen status" % subject)
progesterone = next((hr for hr in pathology.hormone_receptors
if hr.hormone == 'progesterone'),
None)
assert_is_not_none(progesterone, "%s pathology report is missing a"
" progesterone status" % subject)
assert_is_not_none(pathology.genetic_expression,
"%s pathology report is missing a genetic"
" expression status" % subject)
assert_is_not_none(pathology.genetic_expression.her2_neu_ihc,
"%s pathology report is missing a"
" HER2 NEU IHC status" % subject)
assert_is_not_none(pathology.genetic_expression.her2_neu_fish,
"%s pathology report is missing a"
" HER2 NEU FISH status" % subject)
assert_is_not_none(pathology.genetic_expression.ki67,
"%s pathology report is missing a"
" Ki67 status" % subject)
# The first breast subject has value overrides.
if subject.number == 1:
assert_true(estrogen.positive, "The first Breast subject is not"
" estrogen-receptor-positive")
assert_equal(pathology.tnm.lymph_status, 0,
"The first Breast subject lymph status is incorrect")
# A subject who is estrogen-receptor-positive and has no lymph nodes
# has a normalized assay.
if estrogen.positive and not pathology.tnm.lymph_status:
assay = pathology.genetic_expression.normalized_assay
assert_is_not_none(assay, "%s pathology report with HER2"
" positive and no lymph nodes is missing"
" a normalized assay" % subject)
assert_is_not_none(assay.gstm1, "%s pathology report"
" normalized assay is missing"
" a GSTM1 result" % subject)
assert_is_not_none(assay.cd68, "%s pathology report"
" normalized assay is missing"
" a CD68 result" % subject)
assert_is_not_none(assay.bag1, "%s pathology report"
" normalized assay is missing"
" a BAG1 result" % subject)
assert_is_not_none(assay.her2, "%s pathology report"
" normalized assay is missing"
" the HER2 group" % subject)
assert_is_not_none(assay.estrogen, "%s pathology report"
" normalized assay is missing"
" the estrogen group" % subject)
assert_is_not_none(assay.proliferation, "%s pathology report"
" normalized assay is"
" missing the proliferation"
" group" % subject)
assert_is_not_none(assay.invasion, "%s pathology report"
" normalized assay is missing"
" the invasion group" % subject)
def _validate_sarcoma_pathology(self, subject, pathology):
assert_is_not_none(pathology.location,
"%s pathology report is missing a tumor location" % subject)
def _validate_sessions(self, subject):
sessions = list(subject.sessions)
assert_is_not_none(sessions, "%s has no sessions" % subject)
session_cnt = TestSeed.SESSION_CNT[subject.collection]
assert_equal(len(sessions), session_cnt, "%s session count is incorrect: %d" %
(subject, len(sessions)))
for i, session in enumerate(sessions):
# Set a session number for reporting.
session.number = i + 1
self._validate_session(subject, session)
def _validate_session(self, subject, session):
assert_is_not_none(session.date,
"%s session %d is missing the acquisition date" %
(subject, session.number))
assert_is_instance(session.date, datetime,
"%s session %d acquisition date type is incorrect: %s" %
(subject, session.number, session.date.__class__))
self._validate_modeling(subject, session)
self._validate_session_detail(subject, session)
def _validate_modeling(self, subject, session):
# The registration is modeled.
assert_equal(len(session.modelings), 1,
"%s session %d modeling length is incorrect: %d" %
(subject, session.number, len(session.modelings)))
modeling = session.modelings[0]
assert_is_not_none(modeling.resource,
"%s session %d is missing the modeling resource" %
(subject, session.number))
assert_is_not_none(modeling.protocol,
"%s session %d modeling %s is missing the protocol" %
(subject, session.number, modeling.resource))
assert_is_not_none(modeling.source,
"%s session %d modeling %s is missing the source" %
(subject, session.number, modeling.resource))
# Validate the modeling result.
for param in MODELING_RESULT_PARAMS:
value = modeling.result[param]
assert_is_not_none(value,
"%s Subject %d modeling %s is missing a %s parameter" %
(subject.collection, subject.number, modeling.resource, param))
assert_is_not_none(value.image,
"%s Subject %d modeling %s is missing a %s image" %
(subject.collection, subject.number, modeling.resource, param))
metadata = value.image.metadata
assert_is_not_none(metadata,
"%s Subject %d modeling %s is missing %s metadata" %
(subject.collection, subject.number, modeling.resource, param))
avg = metadata.get('average_intensity')
assert_is_not_none(avg,
"%s Subject %d modeling %s is missing %s intensity" %
(subject.collection, subject.number, modeling.resource, param))
# The delta Ktrans result has an overlay.
label_map = modeling.result['delta_k_trans'].label_map
assert_is_not_none(label_map,
"%s Subject %d modeling is missing a label_map" %
(subject.collection, subject.number))
assert_is_not_none(label_map.name,
"%s Subject %d modeling label map is missing a file name" %
(subject.collection, subject.number))
assert_is_not_none(label_map.color_table,
"%s Subject %d modeling label map is missing a color table" %
(subject.collection, subject.number))
def _validate_session_detail(self, subject, session):
assert_is_not_none(session.detail, "%s session %d is missing detail" %
(subject, session.number))
# Validate the scans.
scans = session.detail.scans
assert_equal(len(scans), 2, "%s session %d scan count is incorrect: %d" %
(subject, session.number, len(scans)))
# The T1 scan.
scan = scans[0]
coll = seed.builder_for(subject.collection)
expected_volume_cnt = coll.options.volume_count
assert_equal(len(scan.volumes.images), expected_volume_cnt,
"%s session %d scan %d volumes count is incorrect: %d" %
(subject, session.number, scan.number, len(scan.volumes.images)))
for i, image in enumerate(scan.volumes.images):
assert_is_not_none(image.metadata,
"%s session %d scan %d volume %d is missing metadata" %
(subject, session.number, scan.number, i + 1))
avg = image.metadata.get('average_intensity')
assert_is_not_none(avg,
"%s session %d scan %d volume %d is missing an intensity" %
(subject, session.number, scan.number, i + 1))
# Verify that intensities are floats.
assert_true(isinstance(avg, float),
"%s session %d scan %d volume %d intensity type is"
" incorrect for value %s: %s" %
(subject, session.number, scan.number, i + 1, avg, avg.__class__))
# Validate the registration.
regs = scan.registrations
assert_equal(len(regs), 1, "%s session %d scan %d registration count"
" is incorrect: %d" %
(subject, session.number, scan.number, len(regs)))
for reg in regs:
for i, image in enumerate(reg.volumes.images):
assert_is_not_none(image.metadata,
"%s session %d scan %d registration %s volume %d"
" is missing metadata" %
(subject, session.number, scan.number,
reg.volumes.name, i + 1))
avg = image.metadata.get('average_intensity')
assert_is_not_none(avg,
"%s session %d scan %d registration %s volume %d"
" is missing an intensity" %
(subject, session.number, scan.number,
reg.volumes.name, i + 1))
assert_true(isinstance(avg, float),
"%s session %d scan %d registration %s volume %d intensity"
" type is incorrect for value %s: %s" %
(subject, session.number, scan.number, reg.volumes.name,
i + 1, avg, avg.__class__))
# The T2 scan has one volume without an intensity value.
scan = scans[1]
assert_equal(len(scan.volumes.images), 1,
"%s session %d scan %d volumes count is incorrect: %d" %
(subject, session.number, scan.number, len(scan.volumes.images)))
image = scan.volumes.images[0]
assert_true(not image.metadata,
"%s session %d scan %d volume incorrectly has metadata" %
(subject, session.number, scan.number))
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
import rppy
import numpy as np
import matplotlib.pyplot as plt
vp1 = 3000
vs1 = 1500
p1 = 2000
e1_1 = 0.0
d1_1 = 0.0
y1_1 = 0.0
e2_1 = 0.0
d2_1 = 0.0
y2_1 = 0.0
d3_1 = 0.0
chi1 = 0.0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, e1_1, d1_1, y1_1, e2_1, d2_1, y2_1, d3_1)
vp2 = 4000
vs2 = 2000
p2 = 2200
e1_2 = 0.0
d1_2 = 0.0
y1_2 = 0.0
e2_2 = 0.0
d2_2 = 0.0
y2_2 = 0.0
d3_2 = 0.0
chi2 = 0.0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, e1_2, d1_2, y1_2, e2_2, d2_2, y2_2, d3_2)
phi = np.arange(0, 90, 1)
theta = np.arange(0, 90, 1)
loopang = phi
theta = np.array([30])
rphti = np.zeros(np.shape(loopang))
rpzoe = np.zeros(np.shape(loopang))
rprug = np.zeros(np.shape(loopang))
for aid, val in enumerate(loopang):
rphti[aid] = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, loopang[aid], theta)
rprug[aid] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e2_1, d2_1, y2_1, vp2, vs2, p2, e2_2, d2_2, y2_2, np.radians(theta), np.radians(loopang[aid]))
rpzoe[aid] = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, np.radians(theta))
plt.figure(1)
plt.plot(loopang, rphti, loopang, rprug, loopang, rpzoe)
plt.legend(['hti', 'ruger', 'zoe'])
plt.show()
|
"""
WSGI config for skeleton project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
from invoke import task, Collection
@task
def toplevel(ctx):
pass
@task
def subtask(ctx):
pass
ns = Collection(
toplevel,
Collection('a', subtask,
Collection('nother', subtask)
)
)
|
from cStringIO import StringIO
from datetime import datetime
from unidecode import unidecode
from handler import Patobj, PatentHandler
import re
import uuid
import xml.sax
import xml_util
import xml_driver
xml_string = 'ipg050104.xml'
xh = xml_driver.XMLHandler()
parser = xml_driver.make_parser()
parser.setContentHandler(xh)
parser.setFeature(xml_driver.handler.feature_external_ges, False)
l = xml.sax.xmlreader.Locator()
xh.setDocumentLocator(l)
parser.parse(xml_string)
print "parsing done"
print xh.root.claims.contents_of('claim', '', as_string=True, upper=False)
|
"""Translates between LCLS events and Hummingbird ones"""
from __future__ import print_function # Compatibility with python 2 and 3
import os
import logging
from backend.event_translator import EventTranslator
from backend.record import Record, add_record
import psana
import numpy
import datetime
from pytz import timezone
from . import ureg
from backend import Worker
import ipc
from hummingbird import parse_cmdline_args
_argparser = None
def add_cmdline_args():
global _argparser
from utils.cmdline_args import argparser
_argparser = argparser
group = _argparser.add_argument_group('LCLS', 'Options for the LCLS event translator')
group.add_argument('--lcls-run-number', metavar='lcls_run_number', nargs='?',
help="run number",
type=int)
group.add_argument('--lcls-number-of-frames', metavar='lcls_number_of_frames', nargs='?',
help="number of frames to be processed",
type=int)
# ADUthreshold for offline analysis
#group.add_argument('--ADUthreshold', metavar='ADUthreshold', nargs='?',
# help="ADU threshold",
# type=int)
# Hitscore threshold for offline analysis
#group.add_argument('--hitscore-thr', metavar='hitscore_thr', nargs='?',
# help="Hitscore threshold",
# type=int)
# Output directory for offline analysis
#group.add_argument('--out-dir', metavar='out_dir', nargs='?',
# help="Output directory",
# type=str)
# Reduce output from offline analysis
#group.add_argument('--reduced-output',
# help="Write only very few data to output file",
# action='store_true')
PNCCD_IDS = ['pnccdFront', 'pnccdBack']
ACQ_IDS = [('ACQ%i' % i) for i in range(1,4+1)]
class LCLSTranslator(object):
"""Translate between LCLS events and Hummingbird ones"""
def __init__(self, state):
self.timestamps = None
self.library = 'psana'
config_file = None
if('LCLS/PsanaConf' in state):
config_file = os.path.abspath(state['LCLS/PsanaConf'])
elif('LCLS' in state and 'PsanaConf' in state['LCLS']):
config_file = os.path.abspath(state['LCLS']['PsanaConf'])
if(config_file is not None):
if(not os.path.isfile(config_file)):
raise RuntimeError("Could not find [LCLS][PsanaConf]: %s" %
(config_file))
logging.info("Info: Found configuration file %s.", config_file)
psana.setConfigFile(config_file)
if 'LCLS/CalibDir' in state:
calibdir = state['LCLS/CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
elif('LCLS' in state and 'CalibDir' in state['LCLS']):
calibdir = state['LCLS']['CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
if('LCLS/DataSource' in state):
dsrc = state['LCLS/DataSource']
elif('LCLS' in state and 'DataSource' in state['LCLS']):
dsrc = state['LCLS']['DataSource']
else:
raise ValueError("You need to set the '[LCLS][DataSource]'"
" in the configuration")
cmdline_args = _argparser.parse_args()
self.N = cmdline_args.lcls_number_of_frames
if cmdline_args.lcls_run_number is not None:
dsrc += ":run=%i" % cmdline_args.lcls_run_number
# Cache times of events that shall be extracted from XTC (does not work for stream)
self.event_slice = slice(0,None,1)
if 'times' in state or 'fiducials' in state:
if not ('times' in state and 'fiducials' in state):
raise ValueError("Times or fiducials missing in state."
" Extraction of selected events expects both event identifiers")
if dsrc[:len('exp=')] != 'exp=':
raise ValueError("Extraction of events with given times and fiducials"
" only works when reading from XTC with index files")
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
self.times = state['times']
self.fiducials = state['fiducials']
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
elif 'indexing' in state:
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
if 'index_offset' in state:
self.i = state['index_offset'] / ipc.mpi.nr_event_readers()
else:
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
self.timestamps = self.run.times()
if self.N is not None:
self.timestamps = self.timestamps[:self.N]
self.timestamps = self.timestamps[ipc.mpi.event_reader_rank()::ipc.mpi.nr_event_readers()]
else:
self.times = None
self.fiducials = None
self.i = 0
if not dsrc.startswith('shmem='):
self.event_slice = slice(ipc.mpi.event_reader_rank(), None, ipc.mpi.nr_event_readers())
self.data_source = psana.DataSource(dsrc)
self.run = None
# Define how to translate between LCLS types and Hummingbird ones
self._n2c = {}
self._n2c[psana.Bld.BldDataFEEGasDetEnergy] = 'pulseEnergies'
self._n2c[psana.Bld.BldDataFEEGasDetEnergyV1] = 'pulseEnergies'
self._n2c[psana.Lusi.IpmFexV1] = 'pulseEnergies'
self._n2c[psana.Camera.FrameV1] = 'camera'
# Guard against old(er) psana versions
try:
self._n2c[psana.Bld.BldDataEBeamV1] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV2] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV3] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV4] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV5] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV6] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV7] = 'photonEnergies'
except AttributeError:
pass
# CXI (CsPad)
self._n2c[psana.CsPad.DataV2] = 'photonPixelDetectors'
self._n2c[psana.CsPad2x2.ElementV1] = 'photonPixelDetectors'
# CXI (OffAxis Cam)
#self._n2c[psana.Camera.FrameV1] = 'photonPixelDetectors'
# AMO (pnCCD)
self._n2c[psana.PNCCD.FullFrameV1] = 'photonPixelDetectors'
self._n2c[psana.PNCCD.FramesV1] = 'photonPixelDetectors'
# --
self._n2c[psana.Acqiris.DataDescV1] = 'ionTOFs'
self._n2c[psana.EventId] = 'eventID'
# Guard against old(er) psana versions
try:
self._n2c[psana.EvrData.DataV3] = 'eventCodes'
self._n2c[psana.EvrData.DataV4] = 'eventCodes'
except AttributeError:
pass
# Calculate the inverse mapping
self._c2n = {}
for k, v in self._n2c.iteritems():
self._c2n[v] = self._c2n.get(v, [])
self._c2n[v].append(k)
# Define how to translate between LCLS sources and Hummingbird ones
self._s2c = {}
# CXI (OnAxis Cam)
self._s2c['DetInfo(CxiEndstation.0:Opal4000.1)'] = 'Sc2Questar'
# CXI (OffAxis Cam)
self._s2c['DetInfo(CxiEndstation.0.Opal11000.0)'] = 'Sc2Offaxis'
# CXI (CsPad)
self._s2c['DetInfo(CxiDs1.0:Cspad.0)'] = 'CsPad Ds1'
self._s2c['DetInfo(CxiDsd.0:Cspad.0)'] = 'CsPad Dsd'
self._s2c['DetInfo(CxiDs2.0:Cspad.0)'] = 'CsPad Ds2'
self._s2c['DetInfo(CxiDg3.0:Cspad2x2.0)'] = 'CsPad Dg3'
self._s2c['DetInfo(CxiDg2.0:Cspad2x2.0)'] = 'CsPad Dg2'
# AMO (pnCCD)
self._s2c['DetInfo(Camp.0:pnCCD.1)'] = 'pnccdBack'
self._s2c['DetInfo(Camp.0:pnCCD.0)'] = 'pnccdFront'
# ToF detector
self._s2c['DetInfo(AmoEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoEndstation.0:Acqiris.2)'] = 'Acqiris 2'
# AMO (Acqiris)
self._s2c['DetInfo(AmoETOF.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(AmoETOF.0:Acqiris.1)'] = 'Acqiris 1'
self._s2c['DetInfo(AmoITOF.0:Acqiris.0)'] = 'Acqiris 2'
self._s2c['DetInfo(AmoITOF.0:Acqiris.1)'] = 'Acqiris 3'
# MCP Camera
self._s2c['DetInfo(AmoEndstation.0:Opal1000.1)'] = 'OPAL1'
# CXI (Acqiris)
self._s2c['DetInfo(CxiEndstation.0:Acqiris.0)'] = 'Acqiris 0'
self._s2c['DetInfo(CxiEndstation.0:Acqiris.1)'] = 'Acqiris 1'
self.init_detectors(state)
#print("Detectors:" , psana.DetNames())
def init_detectors(self, state):
# New psana call pattern
self._detectors = {}
self._c2id_detectors = {}
if 'detectors' in state:
for detid, det_dict in state['detectors'].items():
if detid in PNCCD_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['key'] = det_dict['key']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
meth = det_dict['data_method']
if meth == "image":
f = lambda obj, evt: obj.image(evt)
elif meth == "calib":
f = lambda obj, evt: obj.calib(evt)
elif meth == "raw":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
raw = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True)
return raw
elif meth == "calib_pc":
def f(obj, evt):
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
return cdata
elif meth == "calib_cmc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
return cdata
elif meth == "calib_gc":
def f(obj, evt):
#obj = self._detectors[detid]['obj']
rnum = obj.runnum(evt)
cdata = numpy.array(obj.raw(evt), dtype=numpy.float32, copy=True) - obj.pedestals(evt)
obj.common_mode_apply(rnum, cdata, cmpars=None)
gain = obj.gain(evt)
cdata *= gain
return cdata
else:
raise RuntimeError('data_method = %s not supported' % meth)
self._detectors[detid]['data_method'] = f
self._c2id_detectors[det_dict['type']] = detid
print("Set data method for detector id %s to %s." % (det_dict['id'], meth))
elif detid in ACQ_IDS:
self._detectors[detid] = {}
self._detectors[detid]['id'] = det_dict['id']
self._detectors[detid]['type'] = det_dict['type']
self._detectors[detid]['keys'] = det_dict['keys']
obj = psana.Detector(det_dict['id'])
self._detectors[detid]['obj'] = obj
self._c2id_detectors[det_dict['type']] = detid
else:
raise RuntimeError('Detector type = %s not implememented for ID %s' % (det_dict['type'], detid))
def next_event(self):
"""Grabs the next event and returns the translated version"""
if self.timestamps:
try:
evt = self.run.event(self.timestamps[self.i])
except (IndexError, StopIteration) as e:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
self.i += 1
elif self.times is not None:
evt = None
while self.i < len(self.times) and evt is None:
time = psana.EventTime(int(self.times[self.i]), self.fiducials[self.i])
self.i += 1
evt = self.run.event(time)
if evt is None:
print("Unable to find event listed in index file")
# We got to the end without a valid event, time to call it a day
if evt is None:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
else:
try:
while (self.i % self.event_slice.step) != self.event_slice.start:
evt = self.data_source.events().next()
self.i += 1
if self.N is not None and self.i >= self.N:
raise StopIteration
evt = self.data_source.events().next()
self.i += 1
except StopIteration:
#if 'end_of_run' in dir(Worker.conf):
# Worker.conf.end_of_run()
#ipc.mpi.slave_done()
return None
return EventTranslator(evt, self)
def event_keys(self, evt):
"""Returns the translated keys available"""
native_keys = evt.keys()
common_keys = set()
for k in native_keys:
for c in self._native_to_common(k):
common_keys.add(c)
# parameters corresponds to the EPICS values, analysis is for values added later on
return list(common_keys)+['parameters']+['analysis']
def _native_to_common(self, key):
"""Translates a native key to a hummingbird one"""
if(key.type() in self._n2c):
return [self._n2c[key.type()]]
else:
return []
def event_native_keys(self, evt):
"""Returns the native keys available"""
return evt.keys()
def translate(self, evt, key):
"""Returns a dict of Records that match a given humminbird key"""
values = {}
if(key in self._c2id_detectors):
return self.translate_object(evt, key)
elif(key in self._c2n):
return self.translate_core(evt, key)
elif(key == 'parameters'):
return self._tr_epics()
elif(key == 'analysis'):
return {}
elif(key == 'stream'):
return {}
else:
# check if the key matches any of the existing keys in the event
event_keys = evt.keys()
values = {}
found = False
for event_key in event_keys:
if(event_key.key() == key):
obj = evt.get(event_key.type(), event_key.src(), event_key.key())
found = True
add_record(values, 'native', '%s[%s]' % (self._s2c[str(event_key.src())], key),
obj, ureg.ADU)
if(found):
return values
else:
print('%s not found in event' % (key))
def translate_object(self, evt, key):
values = {}
detid = self._c2id_detectors[key]
if detid in PNCCD_IDS:
det = self._detectors[detid]
obj = self._detectors[detid]['obj']
data_nda = det['data_method'](obj, evt)
if data_nda is None:
image = None
elif len(data_nda.shape) <= 2:
image = data_nda
elif len(data_nda.shape) == 3:
image = numpy.hstack([numpy.vstack([data_nda[0],data_nda[1][::-1,::-1]]),
numpy.vstack([data_nda[3],data_nda[2][::-1,::-1]])])
add_record(values, det['type'], det['key'], image, ureg.ADU)
elif detid in ACQ_IDS:
det = self._detectors[detid]
# waveforms are in Volts, times are in Seconds
obj = det['obj']
waveforms = obj.waveform(evt)
#print("waveforms", waveforms)
#times = obj.wftime(evt)
for i, wf in enumerate(waveforms):
add_record(values, det['type'], det['keys'][i], wf, ureg.V)
else:
raise RuntimeError('%s not yet supported' % key)
return values
def translate_core(self, evt, key):
"""Returns a dict of Records that matchs a core Hummingbird key.
Core keys include all except: parameters, any psana create key,
any native key."""
values = {}
native_keys = self._c2n[key]
event_keys = evt.keys()
for k in event_keys:
if(k.type() in native_keys):
obj = evt.get(k.type(), k.src(), k.key())
if(isinstance(obj, psana.Bld.BldDataFEEGasDetEnergy) or
isinstance(obj, psana.Bld.BldDataFEEGasDetEnergyV1)):
self._tr_bld_data_fee_gas_det_energy(values, obj)
elif(isinstance(obj, psana.Lusi.IpmFexV1)):
self._tr_lusi_ipm_fex(values, obj, k)
elif(key == 'photonEnergies'):
self._tr_bld_data_ebeam(values, obj)
elif(isinstance(obj, psana.CsPad2x2.ElementV1)):
self._tr_cspad2x2(values, obj)
elif(isinstance(obj, psana.CsPad.DataV2)):
self._tr_cspad(values, obj, k)
# AMO
elif(isinstance(obj, psana.PNCCD.FullFrameV1)):
self._tr_pnccdFullFrame(values, obj, k)
elif(isinstance(obj, psana.PNCCD.FramesV1)):
self._tr_pnccdFrames(values, obj, k)
# --
elif(isinstance(obj, psana.Acqiris.DataDescV1)):
self._tr_acqiris(values, obj, k)
elif(isinstance(obj, psana.Camera.FrameV1)):
self._tr_camera(values, obj)
elif(isinstance(obj, psana.EventId)):
self._tr_event_id(values, obj)
elif(isinstance(obj, psana.EvrData.DataV3) or
isinstance(obj, psana.EvrData.DataV4)):
self._tr_event_codes(values, obj)
else:
print(type(obj))
print(k)
raise RuntimeError('%s not yet supported' % (type(obj)))
return values
def event_id(self, evt):
"""Returns an id which should be unique for each
shot and increase monotonically"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp
def event_id2(self, evt):
"""Returns the LCLS time, a 64-bit integer as an alterative ID"""
return self.translate(evt, 'eventID')['Timestamp'].timestamp2
def _tr_bld_data_ebeam(self, values, obj):
"""Translates BldDataEBeam to hummingbird photon energy and other beam properties"""
try:
photon_energy_ev = obj.ebeamPhotonEnergy()
except AttributeError:
peak_current = obj.ebeamPkCurrBC2()
dl2_energy_gev = 0.001*obj.ebeamL3Energy()
ltu_wake_loss = 0.0016293*peak_current
# Spontaneous radiation loss per segment
sr_loss_per_segment = 0.63*dl2_energy_gev
# wakeloss in an undulator segment
wake_loss_per_segment = 0.0003*peak_current
# energy loss per segment
energy_loss_per_segment = (sr_loss_per_segment +
wake_loss_per_segment)
# energy in first active undulator segment [GeV]
energy_profile = (dl2_energy_gev - 0.001*ltu_wake_loss -
0.0005*energy_loss_per_segment)
# Calculate the resonant photon energy of the first active segment
photon_energy_ev = 44.42*energy_profile*energy_profile
add_record(values, 'photonEnergies', 'photonEnergy', photon_energy_ev, ureg.eV)
try:
ebeam_ang_x = obj.ebeamLTUAngX()
ebeam_ang_y = obj.ebeamLTUAngY()
ebeam_pos_x = obj.ebeamLTUPosX()
ebeam_pos_y = obj.ebeamLTUPosY()
ebeam_charge = obj.ebeamCharge()
add_record(values, 'photonEnergies', 'angX', ebeam_ang_x)
add_record(values, 'photonEnergies', 'angY', ebeam_ang_y)
add_record(values, 'photonEnergies', 'posX', ebeam_pos_x)
add_record(values, 'photonEnergies', 'posY', ebeam_pos_y)
add_record(values, 'photonEnergies', 'charge', ebeam_charge)
except AttributeError:
print("Couldn't translate electron beam properties from BldDataEBeam")
def _tr_bld_data_fee_gas_det_energy(self, values, obj):
"""Translates gas monitor detector to hummingbird pulse energy"""
# convert from mJ to J
add_record(values, 'pulseEnergies', 'f_11_ENRC', obj.f_11_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_12_ENRC', obj.f_12_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_21_ENRC', obj.f_21_ENRC(), ureg.mJ)
add_record(values, 'pulseEnergies', 'f_22_ENRC', obj.f_22_ENRC(), ureg.mJ)
def _tr_lusi_ipm_fex(self, values, obj, evt_key):
"""Translates Ipm relative pulse energy monitor
to hummingbird pulse energy"""
add_record(values, 'pulseEnergies', 'IpmFex - '+str(evt_key.src()), obj.sum(), ureg.ADU)
def _tr_cspad2x2(self, values, obj):
"""Translates CsPad2x2 to hummingbird numpy array"""
try:
add_record(values, 'photonPixelDetectors', 'CsPad2x2S', obj.data(), ureg.ADU)
except AttributeError:
add_record(values, 'photonPixelDetectors', 'CsPad2x2', obj.data16(), ureg.ADU)
def _tr_camera(self, values, obj):
"""Translates Camera frame to hummingbird numpy array"""
#if obj.depth == 16 or obj.depth() == 12:
# data = obj.data16()
# print(data.shape)
#else:
# data = obj.data8()
# print(data.shape)
data = obj.data16()
# off Axis cam at CXI
#if data.shape == (1024,1024):
# add_record(values, 'camera', 'offAxis', data, ureg.ADU)
# MCP (PNCCD replacement) at AMO (June 2016)
if data.shape == (1024,1024):
add_record(values, 'camera', 'mcp', data, ureg.ADU)
if data.shape == (1752,2336):
add_record(values, 'camera', 'onAxis', data, ureg.ADU)
def _tr_cspad(self, values, obj, evt_key):
"""Translates CsPad to hummingbird numpy array, quad by quad"""
n_quads = obj.quads_shape()[0]
for i in range(0, n_quads):
add_record(values, 'photonPixelDetectors', '%sQuad%d' % (self._s2c[str(evt_key.src())], i),
obj.quads(i).data(), ureg.ADU)
def _tr_pnccdFullFrame(self, values, obj, evt_key):
"""Translates full pnCCD frame to hummingbird numpy array"""
add_record(values, 'photonPixelDetectors', '%sfullFrame' % self._s2c[str(evt_key.src())], obj.data(), ureg.ADU)
def _tr_pnccdFrames(self, values, obj, evt_key):
"""Translates pnCCD frames to hummingbird numpy array, frame by frame"""
n_frames = obj.frame_shape()[0]
for i in range(0, n_frames):
add_record(values, 'photonPixelDetectors', '%sFrame%d' % (self._s2c[str(evt_key.src())], i),
obj.frame(i).data(), ureg.ADU)
def _tr_acqiris(self, values, obj, evt_key):
"""Translates Acqiris TOF data to hummingbird numpy array"""
config_store = self.data_source.env().configStore()
acq_config = config_store.get(psana.Acqiris.ConfigV1, evt_key.src())
samp_interval = acq_config.horiz().sampInterval()
n_channels = obj.data_shape()[0]
for i in range(0, n_channels):
vert = acq_config.vert()[i]
elem = obj.data(i)
timestamp = elem.timestamp()[0].value()
raw = elem.waveforms()[0]
if(elem.nbrSamplesInSeg() == 0):
logging.warning("Warning: TOF data for "
"detector %s is missing.", evt_key)
data = raw*vert.slope() - vert.offset()
rec = Record('%s Channel %d' %(self._s2c[str(evt_key.src())], i),
data, ureg.V)
rec.time = (timestamp +
samp_interval * numpy.arange(0, elem.nbrSamplesInSeg()))
values[rec.name] = rec
def _tr_event_id(self, values, obj):
"""Translates LCLS eventID into a hummingbird one"""
timestamp = obj.time()[0]+obj.time()[1]*1e-9
time = datetime.datetime.fromtimestamp(timestamp, tz=timezone('utc'))
time = time.astimezone(tz=timezone('US/Pacific'))
rec = Record('Timestamp', time, ureg.s)
time = datetime.datetime.fromtimestamp(obj.time()[0])
rec.datetime64 = numpy.datetime64(time, 'ns')+obj.time()[1]
rec.fiducials = obj.fiducials()
rec.run = obj.run()
rec.ticks = obj.ticks()
rec.vector = obj.vector()
rec.timestamp = timestamp
rec.timestamp2 = obj.time()[0] << 32 | obj.time()[1]
values[rec.name] = rec
def _tr_event_codes(self, values, obj):
"""Translates LCLS event codes into a hummingbird ones"""
codes = []
for fifo_event in obj.fifoEvents():
codes.append(fifo_event.eventCode())
add_record(values, 'eventCodes', 'EvrEventCodes', codes)
def _tr_epics(self):
"""Returns an EPICSdict that provides access to EPICS parameters.
Check the EPICSdict class for more details.
"""
return EPICSdict(self.data_source.env().epicsStore())
class EPICSdict(object):
"""Provides a dict-like interface to EPICS parameters.
Translated all the parameters is too slow too slow.
Instead parameters are only translated as they are needed,
when they are accessed, using this class.
"""
def __init__(self, epics):
self.epics = epics
self._cache = {}
self._keys = None
def keys(self):
"""Returns available EPICS names"""
if self._keys is None:
self._keys = self.epics.pvNames() + self.epics.aliases()
return self._keys
def len(self):
"""Returns the length of the dictionary"""
return len(self.keys())
def __getitem__(self, key):
"""Calls psana to retrieve and translate the EPICS item"""
if(key not in self._cache):
pv = self.epics.getPV(key)
if(pv is None):
raise KeyError('%s is not a valid EPICS key' %(key))
rec = Record(key, pv.value(0))
rec.pv = pv
self._cache[key] = rec
return self._cache[key]
|
plot_data.apply(transform_utm_to_wgs, axis=1)
|
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
|
"""
compressible-specific boundary conditions. Here, in particular, we
implement an HSE BC in the vertical direction.
Note: the pyro BC routines operate on a single variable at a time, so
some work will necessarily be repeated.
Also note: we may come in here with the aux_data (source terms), so
we'll do a special case for them
"""
import compressible.eos as eos
from util import msg
import math
import numpy as np
def user(bc_name, bc_edge, variable, ccdata):
"""
A hydrostatic boundary. This integrates the equation of HSE into
the ghost cells to get the pressure and density under the assumption
that the specific internal energy is constant.
Upon exit, the ghost cells for the input variable will be set
Parameters
----------
bc_name : {'hse'}
The descriptive name for the boundary condition -- this allows
for pyro to have multiple types of user-supplied boundary
conditions. For this module, it needs to be 'hse'.
bc_edge : {'ylb', 'yrb'}
The boundary to update: ylb = lower y boundary; yrb = upper y
boundary.
variable : {'density', 'x-momentum', 'y-momentum', 'energy'}
The variable whose ghost cells we are filling
ccdata : CellCenterData2d object
The data object
"""
myg = ccdata.grid
if bc_name == "hse":
if bc_edge == "ylb":
# lower y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
j = myg.jlo-1
while j >= 0:
v[:, j] = v[:, myg.jlo]
j -= 1
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jlo]
ke_base = 0.5*(xmom[:, myg.jlo]**2 + ymom[:, myg.jlo]**2) / \
dens[:, myg.jlo]
eint_base = (ener[:, myg.jlo] - ke_base)/dens[:, myg.jlo]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
j = myg.jlo-1
while j >= 0:
pres_below = pres_base - grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_below)
ener[:, j] = rhoe + ke_base
pres_base = pres_below.copy()
j -= 1
else:
raise NotImplementedError("variable not defined")
elif bc_edge == "yrb":
# upper y boundary
# we will take the density to be constant, the velocity to
# be outflow, and the pressure to be in HSE
if variable in ["density", "x-momentum", "y-momentum", "ymom_src", "E_src", "fuel", "ash"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
v[:, j] = v[:, myg.jhi]
elif variable == "energy":
dens = ccdata.get_var("density")
xmom = ccdata.get_var("x-momentum")
ymom = ccdata.get_var("y-momentum")
ener = ccdata.get_var("energy")
grav = ccdata.get_aux("grav")
gamma = ccdata.get_aux("gamma")
dens_base = dens[:, myg.jhi]
ke_base = 0.5*(xmom[:, myg.jhi]**2 + ymom[:, myg.jhi]**2) / \
dens[:, myg.jhi]
eint_base = (ener[:, myg.jhi] - ke_base)/dens[:, myg.jhi]
pres_base = eos.pres(gamma, dens_base, eint_base)
# we are assuming that the density is constant in this
# formulation of HSE, so the pressure comes simply from
# differencing the HSE equation
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
pres_above = pres_base + grav*dens_base*myg.dy
rhoe = eos.rhoe(gamma, pres_above)
ener[:, j] = rhoe + ke_base
pres_base = pres_above.copy()
else:
raise NotImplementedError("variable not defined")
else:
msg.fail("error: hse BC not supported for xlb or xrb")
elif bc_name == "ramp":
# Boundary conditions for double Mach reflection problem
gamma = ccdata.get_aux("gamma")
if bc_edge == "xlb":
# lower x boundary
# inflow condition with post shock setup
v = ccdata.get_var(variable)
i = myg.ilo - 1
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
val = inflow_post_bc(variable, gamma)
while i >= 0:
v[i, :] = val
i = i - 1
else:
v[:, :] = 0.0 # no source term
elif bc_edge == "ylb":
# lower y boundary
# for x > 1./6., reflective boundary
# for x < 1./6., inflow with post shock setup
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
j = myg.jlo - 1
jj = 0
while j >= 0:
xcen_l = myg.x < 1.0/6.0
xcen_r = myg.x >= 1.0/6.0
v[xcen_l, j] = inflow_post_bc(variable, gamma)
if variable == "y-momentum":
v[xcen_r, j] = -1.0*v[xcen_r, myg.jlo+jj]
else:
v[xcen_r, j] = v[xcen_r, myg.jlo+jj]
j = j - 1
jj = jj + 1
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
elif bc_edge == "yrb":
# upper y boundary
# time-dependent boundary, the shockfront moves with a 10 mach velocity forming an angle
# to the x-axis of 30 degrees clockwise.
# x coordinate of the grid is used to judge whether the cell belongs to pure post shock area,
# the pure pre shock area or the mixed area.
if variable in ["density", "x-momentum", "y-momentum", "energy"]:
v = ccdata.get_var(variable)
for j in range(myg.jhi+1, myg.jhi+myg.ng+1):
shockfront_up = 1.0/6.0 + (myg.y[j] + 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront_down = 1.0/6.0 + (myg.y[j] - 0.5*myg.dy*math.sqrt(3))/math.tan(math.pi/3.0) \
+ (10.0/math.sin(math.pi/3.0))*ccdata.t
shockfront = np.array([shockfront_down, shockfront_up])
for i in range(myg.ihi+myg.ng+1):
v[i, j] = 0.0
cx_down = myg.x[i] - 0.5*myg.dx*math.sqrt(3)
cx_up = myg.x[i] + 0.5*myg.dx*math.sqrt(3)
cx = np.array([cx_down, cx_up])
for sf in shockfront:
for x in cx:
if x < sf:
v[i, j] = v[i, j] + 0.25*inflow_post_bc(variable, gamma)
else:
v[i, j] = v[i, j] + 0.25*inflow_pre_bc(variable, gamma)
else:
v = ccdata.get_var(variable)
v[:, :] = 0.0 # no source term
else:
msg.fail("error: bc type %s not supported" % (bc_name))
def inflow_post_bc(var, g):
# inflow boundary condition with post shock setup
r_l = 8.0
u_l = 7.1447096
v_l = -4.125
p_l = 116.5
if var == "density":
vl = r_l
elif var == "x-momentum":
vl = r_l*u_l
elif var == "y-momentum":
vl = r_l*v_l
elif var == "energy":
vl = p_l/(g - 1.0) + 0.5*r_l*(u_l*u_l + v_l*v_l)
else:
vl = 0.0
return vl
def inflow_pre_bc(var, g):
# pre shock setup
r_r = 1.4
u_r = 0.0
v_r = 0.0
p_r = 1.0
if var == "density":
vl = r_r
elif var == "x-momentum":
vl = r_r*u_r
elif var == "y-momentum":
vl = r_r*v_r
elif var == "energy":
vl = p_r/(g - 1.0) + 0.5*r_r*(u_r*u_r + v_r*v_r)
else:
vl = 0.0
return vl
|
"""Unit tests for owners_finder.py."""
import os
import sys
import unittest
if sys.version_info.major == 2:
import mock
else:
from unittest import mock
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support import filesystem_mock
import owners_finder
import owners_client
ben = 'ben@example.com'
brett = 'brett@example.com'
darin = 'darin@example.com'
jochen = 'jochen@example.com'
john = 'john@example.com'
ken = 'ken@example.com'
peter = 'peter@example.com'
tom = 'tom@example.com'
nonowner = 'nonowner@example.com'
def owners_file(*email_addresses, **kwargs):
s = ''
if kwargs.get('comment'):
s += '# %s\n' % kwargs.get('comment')
if kwargs.get('noparent'):
s += 'set noparent\n'
return s + '\n'.join(email_addresses) + '\n'
class TestClient(owners_client.OwnersClient):
def __init__(self):
super(TestClient, self).__init__()
self.owners_by_path = {
'DEPS': [ken, peter, tom],
'base/vlog.h': [ken, peter, tom],
'chrome/browser/defaults.h': [brett, ben, ken, peter, tom],
'chrome/gpu/gpu_channel.h': [ken, ben, brett, ken, peter, tom],
'chrome/renderer/gpu/gpu_channel_host.h': [peter, ben, brett, ken, tom],
'chrome/renderer/safe_browsing/scorer.h': [peter, ben, brett, ken, tom],
'content/content.gyp': [john, darin],
'content/bar/foo.cc': [john, darin],
'content/baz/froboz.h': [brett, john, darin],
'content/baz/ugly.cc': [brett, john, darin],
'content/baz/ugly.h': [brett, john, darin],
'content/common/common.cc': [jochen, john, darin],
'content/foo/foo.cc': [jochen, john, darin],
'content/views/pie.h': [ben, john, self.EVERYONE],
}
def ListOwners(self, path):
path = path.replace(os.sep, '/')
return self.owners_by_path[path]
class OutputInterceptedOwnersFinder(owners_finder.OwnersFinder):
def __init__(
self, files, author, reviewers, client, disable_color=False):
super(OutputInterceptedOwnersFinder, self).__init__(
files, author, reviewers, client, disable_color=disable_color)
self.output = []
self.indentation_stack = []
def resetText(self):
self.output = []
self.indentation_stack = []
def indent(self):
self.indentation_stack.append(self.output)
self.output = []
def unindent(self):
block = self.output
self.output = self.indentation_stack.pop()
self.output.append(block)
def writeln(self, text=''):
self.output.append(text)
class _BaseTestCase(unittest.TestCase):
default_files = [
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h',
'content/views/pie.h'
]
def ownersFinder(self, files, author=nonowner, reviewers=None):
reviewers = reviewers or []
return OutputInterceptedOwnersFinder(
files, author, reviewers, TestClient(), disable_color=True)
def defaultFinder(self):
return self.ownersFinder(self.default_files)
class OwnersFinderTests(_BaseTestCase):
def test_constructor(self):
self.assertNotEqual(self.defaultFinder(), None)
def test_skip_files_owned_by_reviewers(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_skip_files_owned_by_author(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, author=brett)
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_native_path_sep(self):
# Create a path with backslashes on Windows to make sure these are handled.
# This test is a harmless duplicate on other platforms.
native_slashes_path = 'chrome/browser/defaults.h'.replace('/', os.sep)
files = [
native_slashes_path, # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_reset(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
for _ in range(2):
expected = [brett, darin, john, peter, ken, ben, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.unreviewed_files, {
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h'
})
self.assertEqual(finder.selected_owners, set())
self.assertEqual(finder.deselected_owners, set())
self.assertEqual(finder.reviewed_by, {})
self.assertEqual(finder.output, [])
finder.select_owner(john)
finder.reset()
finder.resetText()
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_select(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.select_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {john})
self.assertEqual(finder.deselected_owners, {darin})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': john,
'content/baz/ugly.cc': john,
'content/baz/ugly.h': john,
'content/content.gyp': john})
self.assertEqual(finder.output,
['Selected: ' + john, 'Deselected: ' + darin])
finder = self.defaultFinder()
finder.select_owner(darin)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Selected: ' + darin, 'Deselected: ' + john])
finder = self.defaultFinder()
finder.select_owner(brett)
expected = [darin, john, peter, ken, tom]
self.assertEqual(finder.owners_queue, expected)
self.assertEqual(finder.selected_owners, {brett})
self.assertEqual(finder.deselected_owners, {ben})
self.assertEqual(finder.reviewed_by,
{'chrome/browser/defaults.h': brett,
'chrome/gpu/gpu_channel.h': brett,
'chrome/renderer/gpu/gpu_channel_host.h': brett,
'chrome/renderer/safe_browsing/scorer.h': brett,
'content/baz/ugly.cc': brett,
'content/baz/ugly.h': brett})
self.assertEqual(finder.output,
['Selected: ' + brett, 'Deselected: ' + ben])
@mock.patch('owners_client.OwnersClient.ScoreOwners')
def test_deselect(self, mockScoreOwners):
mockScoreOwners.return_value = [brett, darin, john, peter, ken, ben, tom]
finder = self.defaultFinder()
finder.deselect_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Deselected: ' + john, 'Selected: ' + darin])
def test_print_file_info(self):
finder = self.defaultFinder()
finder.print_file_info('chrome/browser/defaults.h')
self.assertEqual(finder.output, ['chrome/browser/defaults.h [5]'])
finder.resetText()
finder.print_file_info('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h [5]'])
def test_print_file_info_detailed(self):
finder = self.defaultFinder()
finder.print_file_info_detailed('chrome/browser/defaults.h')
self.assertEqual(finder.output,
['chrome/browser/defaults.h',
[ben, brett, ken, peter, tom]])
finder.resetText()
finder.print_file_info_detailed('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h',
[ben, brett, ken, peter, tom]])
if __name__ == '__main__':
unittest.main()
|
from activitystreams import Activity, Object, MediaLink, ActionLink, Link
import re
import datetime
import time
class AtomActivity(Activity):
pass
class ObjectParseMode(object):
def __init__(self, reprstring):
self.reprstring = reprstring
def __repr__(self):
return self.reprstring
ObjectParseMode.ATOM_ENTRY = ObjectParseMode("ObjectParseMode.ATOM_ENTRY")
ObjectParseMode.ATOM_AUTHOR = ObjectParseMode("ObjectParseMode.ATOM_AUTHOR")
ObjectParseMode.ACTIVITY_OBJECT = ObjectParseMode("ObjectParseMode.ACTIVITY_OBJECT")
ATOM_PREFIX = "{http://www.w3.org/2005/Atom}"
ACTIVITY_PREFIX = "{http://activitystrea.ms/spec/1.0/}"
MEDIA_PREFIX = "{http://purl.org/syndication/atommedia}"
ATOM_FEED = ATOM_PREFIX + "feed"
ATOM_ENTRY = ATOM_PREFIX + "entry"
ATOM_ID = ATOM_PREFIX + "id"
ATOM_AUTHOR = ATOM_PREFIX + "author"
ATOM_SOURCE = ATOM_PREFIX + "source"
ATOM_TITLE = ATOM_PREFIX + "title"
ATOM_SUMMARY = ATOM_PREFIX + "summary"
ATOM_CONTENT = ATOM_PREFIX + "content"
ATOM_LINK = ATOM_PREFIX + "link"
ATOM_PUBLISHED = ATOM_PREFIX + "published"
ATOM_NAME = ATOM_PREFIX + "name"
ATOM_URI = ATOM_PREFIX + "uri"
ATOM_GENERATOR = ATOM_PREFIX + "generator"
ATOM_ICON = ATOM_PREFIX + "icon"
ACTIVITY_SUBJECT = ACTIVITY_PREFIX + "subject"
ACTIVITY_OBJECT = ACTIVITY_PREFIX + "object"
ACTIVITY_OBJECT_TYPE = ACTIVITY_PREFIX + "object-type"
ACTIVITY_VERB = ACTIVITY_PREFIX + "verb"
ACTIVITY_TARGET = ACTIVITY_PREFIX + "target"
ACTIVITY_ACTOR = ACTIVITY_PREFIX + "actor"
POST_VERB = "http://activitystrea.ms/schema/1.0/post"
MEDIA_WIDTH = MEDIA_PREFIX + "width"
MEDIA_HEIGHT = MEDIA_PREFIX + "height"
MEDIA_DURATION = MEDIA_PREFIX + "duration"
MEDIA_DESCRIPTION = MEDIA_PREFIX + "description"
def make_activities_from_feed(et):
feed_elem = et.getroot()
entry_elems = feed_elem.findall(ATOM_ENTRY)
activities = []
for entry_elem in entry_elems:
activities.extend(make_activities_from_entry(entry_elem, feed_elem))
return activities
def make_activities_from_entry(entry_elem, feed_elem):
object_elems = entry_elem.findall(ACTIVITY_OBJECT)
activity_is_implied = False
if len(object_elems) == 0:
# Implied activity, so the entry itself represents the object.
activity_is_implied = True
object_elems = [ entry_elem ]
author_elem = entry_elem.find(ATOM_AUTHOR)
if author_elem is None:
source_elem = entry_elem.find(ATOM_SOURCE)
if source_elem is not None:
author_elem = source_elem.find(ATOM_AUTHOR)
if author_elem is None:
author_elem = feed_elem.find(ATOM_AUTHOR)
target_elem = entry_elem.find(ACTIVITY_TARGET)
published_elem = entry_elem.find(ATOM_PUBLISHED)
published_datetime = None
if published_elem is not None:
published_w3cdtf = published_elem.text
published_datetime = _parse_date_w3cdtf(published_w3cdtf)
verb_elem = entry_elem.find(ACTIVITY_VERB)
verb = None
if verb_elem is not None:
verb = verb_elem.text
else:
verb = POST_VERB
generator_elem = entry_elem.find(ATOM_GENERATOR)
icon_url = None
icon_elem = entry_elem.find(ATOM_ICON)
if icon_elem is not None:
icon_url = icon_elem.text
target = None
if target_elem:
target = make_object_from_elem(target_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
actor = None
if author_elem:
actor = make_object_from_elem(author_elem, feed_elem, ObjectParseMode.ATOM_AUTHOR)
activities = []
for object_elem in object_elems:
if activity_is_implied:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ATOM_ENTRY)
else:
object = make_object_from_elem(object_elem, feed_elem, ObjectParseMode.ACTIVITY_OBJECT)
activity = Activity(object=object, actor=actor, target=target, verb=verb, time=published_datetime, icon_url=icon_url)
activities.append(activity)
return activities
def make_object_from_elem(object_elem, feed_elem, mode):
id = None
id_elem = object_elem.find(ATOM_ID)
if id_elem is not None:
id = id_elem.text
summary = None
summary_elem = object_elem.find(ATOM_SUMMARY)
if summary_elem is not None:
summary = summary_elem.text
name_tag_name = ATOM_TITLE
# The ATOM_AUTHOR parsing mode looks in atom:name instead of atom:title
if mode == ObjectParseMode.ATOM_AUTHOR:
name_tag_name = ATOM_NAME
name = None
name_elem = object_elem.find(name_tag_name)
if name_elem is not None:
name = name_elem.text
url = None
image = None
for link_elem in object_elem.findall(ATOM_LINK):
type = link_elem.get("type")
rel = link_elem.get("rel")
if rel is None or rel == "alternate":
if type is None or type == "text/html":
url = link_elem.get("href")
if rel == "preview":
if type is None or type == "image/jpeg" or type == "image/gif" or type == "image/png":
# FIXME: Should pull out the width/height/duration attributes from AtomMedia too.
image = MediaLink(url=link_elem.get("href"))
# In the atom:author parse mode we fall back on atom:uri if there's no link rel="alternate"
if url is None and mode == ObjectParseMode.ATOM_AUTHOR:
uri_elem = object_elem.find(ATOM_URI)
if uri_elem is not None:
url = uri_elem.text
object_type_elem = object_elem.find(ACTIVITY_OBJECT_TYPE)
object_type = None
if object_type_elem is not None:
object_type = object_type_elem.text
return Object(id=id, name=name, url=url, object_type=object_type, image=image, summary=summary)
def _parse_date_w3cdtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(float(seconds))
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return datetime.datetime.utcfromtimestamp(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
if parse_version(matplotlib.__version__) >= parse_version('2.1'):
density_param = {'density': True}
else:
density_param = {'normed': True}
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
eps_range = np.linspace(0.01, 0.99, 100)
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
if '--use-digits-dataset' in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist])
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0., 2.), edgecolor='k', **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
import rospy
import sys
from socket import error
from twisted.internet import reactor
from rosbridge_server import RosbridgeUdpSocket,RosbridgeUdpFactory
def shutdown_hook():
reactor.stop()
if __name__ == "__main__":
rospy.init_node("rosbridge_websocket")
rospy.on_shutdown(shutdown_hook) # register shutdown hook to stop the server
##################################################
# Parameter handling #
##################################################
# get RosbridgeProtocol parameters
RosbridgeUdpSocket.fragment_timeout = rospy.get_param('~fragment_timeout',
RosbridgeUdpSocket.fragment_timeout)
RosbridgeUdpSocket.delay_between_messages = rospy.get_param('~delay_between_messages',
RosbridgeUdpSocket.delay_between_messages)
RosbridgeUdpSocket.max_message_size = rospy.get_param('~max_message_size',
RosbridgeUdpSocket.max_message_size)
if RosbridgeUdpSocket.max_message_size == "None":
RosbridgeUdpSocket.max_message_size = None
# if authentication should be used
RosbridgeUdpSocket.authenticate = rospy.get_param('~authenticate', False)
port = rospy.get_param('~port', 9090)
interface = rospy.get_param('~interface', "")
if "--port" in sys.argv:
idx = sys.argv.index("--port")+1
if idx < len(sys.argv):
port = int(sys.argv[idx])
else:
print "--port argument provided without a value."
sys.exit(-1)
if "--interface" in sys.argv:
idx = sys.argv.index("--interface")+1
if idx < len(sys.argv):
interface = int(sys.argv[idx])
else:
print "--interface argument provided without a value."
sys.exit(-1)
if "--fragment_timeout" in sys.argv:
idx = sys.argv.index("--fragment_timeout") + 1
if idx < len(sys.argv):
RosbridgeUdpSocket.fragment_timeout = int(sys.argv[idx])
else:
print "--fragment_timeout argument provided without a value."
sys.exit(-1)
if "--delay_between_messages" in sys.argv:
idx = sys.argv.index("--delay_between_messages") + 1
if idx < len(sys.argv):
RosbridgeUdpSocket.delay_between_messages = float(sys.argv[idx])
else:
print "--delay_between_messages argument provided without a value."
sys.exit(-1)
if "--max_message_size" in sys.argv:
idx = sys.argv.index("--max_message_size") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeUdpSocket.max_message_size = None
else:
RosbridgeUdpSocket.max_message_size = int(value)
else:
print "--max_message_size argument provided without a value. (can be None or <Integer>)"
sys.exit(-1)
##################################################
# Done with parameter handling #
##################################################
rospy.loginfo("Rosbridge UDP server started on port %d", port)
reactor.listenUDP(port, RosbridgeUdpFactory(), interface=interface)
reactor.run()
|
from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.tests.utils import DummyEvent
from eve import STATUS_OK, LAST_UPDATED, ID_FIELD, ISSUES, STATUS, ETAG
from eve.methods.patch import patch_internal
class TestPatch(TestBase):
def test_patch_to_resource_endpoint(self):
_, status = self.patch(self.known_resource_url, data={})
self.assert405(status)
def test_readonly_resource(self):
_, status = self.patch(self.readonly_id_url, data={})
self.assert405(status)
def test_unknown_id(self):
_, status = self.patch(self.unknown_item_id_url,
data={"key1": 'value1'})
self.assert404(status)
def test_unknown_id_different_resource(self):
# patching a 'user' with a valid 'contact' id will 404
_, status = self.patch('%s/%s/' % (self.different_resource,
self.item_id),
data={"key1": "value1"})
self.assert404(status)
# of course we can still patch a 'user'
_, status = self.patch('%s/%s/' % (self.different_resource,
self.user_id),
data={'key1': '{"username": "username1"}'},
headers=[('If-Match', self.user_etag)])
self.assert200(status)
def test_by_name(self):
_, status = self.patch(self.item_name_url, data={'key1': 'value1'})
self.assert405(status)
def test_ifmatch_missing(self):
_, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert403(status)
def test_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
r, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert200(status)
self.assertTrue(ETAG not in r)
def test_ifmatch_bad_etag(self):
_, status = self.patch(self.item_id_url,
data={'key1': 'value1'},
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_unique_value(self):
# TODO
# for the time being we are happy with testing only Eve's custom
# validation. We rely on Cerberus' own test suite for other validation
# unit tests. This test also makes sure that response status is
# syntatically correcy in case of validation issues.
# We should probably test every single case as well (seems overkill).
r, status = self.patch(self.item_id_url,
data={"ref": "%s" % self.alt_ref},
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': "value '%s' is not unique" %
self.alt_ref})
def test_patch_string(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_integer(self):
field = "prog"
test_value = 9999
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_list_as_array(self):
field = "role"
test_value = ["vendor", "client"]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertTrue(set(test_value).issubset(db_value))
def test_patch_rows(self):
field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
for test_item in test_value:
self.assertTrue(test_item in db_value)
def test_patch_list(self):
field = "alist"
test_value = ["a_string", 99]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_dict(self):
field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
changes = {field: test_value}
original_city = []
def keep_original_city(resource_name, updates, original):
original_city.append(original['location']['city'])
self.app.on_update += keep_original_city
self.app.on_updated += keep_original_city
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
self.assertEqual(original_city[0], original_city[1])
def test_patch_datetime(self):
field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_objectid(self):
field = "tid"
test_value = "4f71c129c88e2018d4000000"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_null_objectid(self):
# verify that #341 is fixed.
field = "tid"
test_value = None
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_defaults(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r)
def test_patch_defaults_with_post_override(self):
field = "ref"
test_value = "1234567890123456789012345"
r = self.perform_patch_with_post_override(field, test_value)
self.assert200(r.status_code)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title',
json.loads(r.get_data()))
def test_patch_multiple_fields(self):
fields = ['ref', 'prog', 'role']
test_values = ["9876543210987654321054321", 123, ["agent"]]
changes = {"ref": test_values[0], "prog": test_values[1],
"role": test_values[2]}
r = self.perform_patch(changes)
db_values = self.compare_patch_with_get(fields, r)
for i in range(len(db_values)):
self.assertEqual(db_values[i], test_values[i])
def test_patch_with_post_override(self):
# a POST request with PATCH override turns into a PATCH request
r = self.perform_patch_with_post_override('prog', 1)
self.assert200(r.status_code)
def test_patch_internal(self):
# test that patch_internal is available and working properly.
test_field = 'ref'
test_value = "9876543210987654321098765"
data = {test_field: test_value}
with self.app.test_request_context(self.item_id_url):
r, _, _, status = patch_internal(
self.known_resource, data, concurrency_check=False,
**{'_id': self.item_id})
db_value = self.compare_patch_with_get(test_field, r)
self.assertEqual(db_value, test_value)
self.assert200(status)
def test_patch_etag_header(self):
# test that Etag is always includer with response header. See #562.
changes = {"ref": "1234567890123456789012345"}
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
r = self.test_client.patch(self.item_id_url,
data=json.dumps(changes),
headers=headers)
self.assertTrue('Etag' in r.headers)
def test_patch_nested(self):
changes = {'location.city': 'a nested city',
'location.address': 'a nested address'}
r = self.perform_patch(changes)
values = self.compare_patch_with_get('location', r)
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def perform_patch(self, changes):
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
return r
def perform_patch_with_post_override(self, field, value):
headers = [('X-HTTP-Method-Override', 'PATCH'),
('If-Match', self.item_etag),
('Content-Type', 'application/json')]
return self.test_client.post(self.item_id_url,
data=json.dumps({field: value}),
headers=headers)
def compare_patch_with_get(self, fields, patch_response):
raw_r = self.test_client.get(self.item_id_url)
r, status = self.parse_response(raw_r)
self.assert200(status)
self.assertEqual(raw_r.headers.get('ETag'),
patch_response[ETAG])
if isinstance(fields, str):
return r[fields]
else:
return [r[field] for field in fields]
def test_patch_allow_unknown(self):
changes = {"unknown": "unknown"}
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown field'})
self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
def test_patch_x_www_form_urlencoded(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_referential_integrity(self):
data = {"person": self.unknown_item_id}
headers = [('If-Match', self.invoice_etag)]
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.app.config['ID_FIELD']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(r, self.invoice_id)
def test_patch_write_concern_success(self):
# 0 and 1 are the only valid values for 'w' on our mongod instance (1
# is the default)
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
def test_patch_write_concern_fail(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_patch_missing_standard_date_fields(self):
"""Documents created outside the API context could be lacking the
LAST_UPDATED and/or DATE_CREATED fields.
"""
# directly insert a document, without DATE_CREATED e LAST_UPDATED
# values.
contacts = self.random_contacts(1, False)
ref = 'test_update_field'
contacts[0]['ref'] = ref
_db = self.connection[MONGO_DBNAME]
_db.contacts.insert(contacts)
# now retrieve same document via API and get its etag, which is
# supposed to be computed on default DATE_CREATED and LAST_UPDATAED
# values.
response, status = self.get(self.known_resource, item=ref)
etag = response[ETAG]
_id = response['_id']
# attempt a PATCH with the new etag.
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch('%s/%s' % (self.known_resource_url, _id),
data=changes, headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
data = {"inv_number": "new_number"}
headers = [('If-Match', etag)]
response, status = self.patch('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(response, self.invoice_id)
def test_patch_bandwidth_saver(self):
changes = {'ref': '1234567890123456789012345'}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r = self.perform_patch(changes)
self.assertFalse('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
self.item_etag = r[self.app.config['ETAG']]
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r = self.perform_patch(changes)
self.assertTrue('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
def test_patch_readonly_field_with_previous_document(self):
schema = self.domain['contacts']['schema']
del(schema['ref']['required'])
# disable read-only on the field so we can store a value which is
# also different form its default value.
schema['read_only_field']['readonly'] = False
changes = {'read_only_field': 'value'}
r = self.perform_patch(changes)
# resume read-only status for the field
self.domain['contacts']['schema']['read_only_field']['readonly'] = True
# test that if the read-only field is included with the payload and its
# value is equal to the one stored with the document, validation
# succeeds (#479).
etag = r['_etag']
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
# test that if the read-only field is included with the payload and its
# value is different from the stored document, validation fails.
etag = r['_etag']
changes = {'read_only_field': 'another value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert422(status)
self.assertTrue('is read-only' in r['_issues']['read_only_field'])
def test_patch_nested_document_not_overwritten(self):
""" Test that nested documents are not overwritten on PATCH and #519
is fixed.
"""
schema = {
'sensor': {
"type": "dict",
"schema": {
"name": {"type": "string"},
"lon": {"type": "float"},
"lat": {"type": "float"},
"value": {"type": "float", "default": 10.3},
"dict": {
'type': 'dict',
'schema': {
'string': {'type': 'string'},
'int': {'type': 'integer'},
}
}
}
},
'test': {
'type': 'string',
'readonly': True,
'default': 'default'
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {
'sensor': {
'name': 'device_name',
'lon': 43.4,
'lat': 1.31,
'dict': {'int': 99}
}
}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag, value, test, int = (
r[ID_FIELD],
r[ETAG],
r['sensor']['value'],
r['test'],
r['sensor']['dict']['int']
)
changes = {
'sensor': {
'lon': 10.0,
'dict': {'string': 'hi'}
}
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
etag, value, int = (
r[ETAG],
r['sensor']['value'],
r['sensor']['dict']['int']
)
self.assertEqual(value, 10.3)
self.assertEqual(test, 'default')
self.assertEqual(int, 99)
def test_patch_nested_document_nullable_missing(self):
schema = {
'sensor': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
'default': None,
},
'other': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag = r[ID_FIELD], r[ETAG]
self.assertTrue('sensor' in r)
self.assertEqual(r['sensor'], None)
self.assertFalse('other' in r)
changes = {
'sensor': {'name': 'device_name'},
'other': {'name': 'other_name'},
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
self.assertEqual(r['sensor'], {'name': 'device_name'})
self.assertEqual(r['other'], {'name': 'other_name'})
def test_patch_dependent_field_on_origin_document(self):
""" Test that when patching a field which is dependent on another and
this other field is not provided with the patch but is still present
on the target document, the patch will be accepted. See #363.
"""
# this will fail as dependent field is missing even in the
# document we are trying to update.
del(self.domain['contacts']['schema']['dependency_field1']['default'])
del(self.domain['contacts']['defaults']['dependency_field1'])
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert422(status)
# update the stored document by adding dependency field.
changes = {'dependency_field1': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
# now the field2 update will be accepted as the dependency field is
# present in the stored document already.
etag = r['_etag']
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
def assertPatchResponse(self, response, item_id):
self.assertTrue(STATUS in response)
self.assertTrue(STATUS_OK in response[STATUS])
self.assertFalse(ISSUES in response)
self.assertTrue(ID_FIELD in response)
self.assertEqual(response[ID_FIELD], item_id)
self.assertTrue(LAST_UPDATED in response)
self.assertTrue(ETAG in response)
self.assertTrue('_links' in response)
self.assertItemLink(response['_links'], item_id)
def patch(self, url, data, headers=[]):
headers.append(('Content-Type', 'application/json'))
r = self.test_client.patch(url,
data=json.dumps(data),
headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_ref = "0123456789012345678901234"
def test_on_pre_PATCH(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_pre_PATCH_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_PATCH_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_PATCH += filter_this
# Would normally patch the known document; will return 404 instead.
r, s = self.parse_response(self.patch())
self.assert404(s)
def test_on_post_PATCH(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(200, devent.called[2].status_code)
self.assertEqual(3, len(devent.called))
def test_on_post_PATCH_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH_contacts += devent
self.patch()
self.assertEqual(200, devent.called[1].status_code)
self.assertEqual(2, len(devent.called))
def test_on_update(self):
devent = DummyEvent(self.before_update)
self.app.on_update += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_update_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_update_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_updated(self):
devent = DummyEvent(self.after_update)
self.app.on_updated += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_updated_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_updated_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def before_update(self):
db = self.connection[MONGO_DBNAME]
contact = db.contacts.find_one(ObjectId(self.item_id))
return contact['ref'] == self.item_name
def after_update(self):
return not self.before_update()
def patch(self):
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
data = json.dumps({"ref": self.new_ref})
return self.test_client.patch(
self.item_id_url, data=data, headers=headers)
|
import argparse
import collections
import datetime
import email.mime.text
import getpass
import os
import re
import smtplib
import subprocess
import sys
import tempfile
import urllib2
BUILD_DIR = os.path.dirname(__file__)
NACL_DIR = os.path.dirname(BUILD_DIR)
TOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')
PKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
PKGS = ['pnacl_newlib', 'pnacl_translator']
REV_FILES = [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package)
for package in PKGS]
def ParseArgs(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Update pnacl_newlib.json PNaCl version.
LLVM and other projects are checked-in to the NaCl repository, but their
head isn't necessarily the one that we currently use in PNaCl. The
pnacl_newlib.json and pnacl_translator.json files point at subversion
revisions to use for tools such as LLVM. Our build process then
downloads pre-built tool tarballs from the toolchain build waterfall.
git repository before running this script:
______________________
| |
v |
...----A------B------C------D------ NaCl HEAD
^ ^ ^ ^
| | | |__ Latest pnacl_{newlib,translator}.json update.
| | |
| | |__ A newer LLVM change (LLVM repository HEAD).
| |
| |__ Oldest LLVM change since this PNaCl version.
|
|__ pnacl_{newlib,translator}.json points at an older LLVM change.
git repository after running this script:
_______________
| |
v |
...----A------B------C------D------E------ NaCl HEAD
Note that there could be any number of non-PNaCl changes between each of
these changelists, and that the user can also decide to update the
pointer to B instead of C.
There is further complication when toolchain builds are merged.
""")
parser.add_argument('--email', metavar='ADDRESS', type=str,
default=getpass.getuser()+'@chromium.org',
help="Email address to send errors to.")
parser.add_argument('--svn-id', metavar='SVN_ID', type=int, default=0,
help="Update to a specific SVN ID instead of the most "
"recent SVN ID with a PNaCl change. This value must "
"be more recent than the one in the current "
"pnacl_newlib.json. This option is useful when multiple "
"changelists' toolchain builds were merged, or when "
"too many PNaCl changes would be pulled in at the "
"same time.")
parser.add_argument('--dry-run', default=False, action='store_true',
help="Print the changelist that would be sent, but "
"don't actually send anything to review.")
# TODO(jfb) The following options come from download_toolchain.py and
# should be shared in some way.
parser.add_argument('--filter_out_predicates', default=[],
help="Toolchains to filter out.")
return parser.parse_args()
def ExecCommand(command):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.stderr.write('\nRunning `%s` returned %i, got:\n%s\n' %
(' '.join(e.cmd), e.returncode, e.output))
raise
def GetCurrentRevision():
return [ExecCommand([sys.executable, PKG_VER,
'getrevision',
'--revision-package', package]).strip()
for package in PKGS]
def SetCurrentRevision(revision_num):
for package in PKGS:
ExecCommand([sys.executable, PKG_VER] +
# TODO(dschuff) pnacl_newlib shouldn't use cloud-bucket
# once we switch fully to toolchain_build.
(['--cloud-bucket', 'nativeclient-archive2/pnacl_buildsh'] if
package == 'pnacl_newlib' else []) +
['setrevision',
'--revision-package', package,
'--revision', str(revision_num)])
def GitCurrentBranch():
return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip()
def GitStatus():
"""List of statuses, one per path, of paths in the current git branch.
Ignores untracked paths."""
out = ExecCommand(['git', 'status', '--porcelain']).strip().split('\n')
return [f.strip() for f in out if not re.match('^\?\? (.*)$', f.strip())]
def SyncSources():
"""Assumes a git-svn checkout of NaCl. See:
www.chromium.org/nativeclient/how-tos/how-to-use-git-svn-with-native-client
"""
ExecCommand(['gclient', 'sync'])
def GitCommitInfo(info='', obj=None, num=None, extra=[]):
"""Commit information, where info is one of the shorthands in git_formats.
obj can be a path or a hash.
num is the number of results to return.
extra is a list of optional extra arguments."""
# Shorthands for git's pretty formats.
# See PRETTY FORMATS format:<string> in `git help log`.
git_formats = {
'': '',
'hash': '%H',
'date': '%ci',
'author': '%aN',
'subject': '%s',
'body': '%b',
}
cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra
if num: cmd += ['-n'+str(num)]
if obj: cmd += [obj]
return ExecCommand(cmd).strip()
def GitCommitsSince(date):
"""List of commit hashes since a particular date,
in reverse chronological order."""
return GitCommitInfo(info='hash',
extra=['--since="%s"' % date]).split('\n')
def GitFilesChanged(commit_hash):
"""List of files changed in a commit."""
return GitCommitInfo(obj=commit_hash, num=1,
extra=['--name-only']).split('\n')
def GitChangesPath(commit_hash, path):
"""Returns True if the commit changes a file under the given path."""
return any([
re.search('^' + path, f.strip()) for f in
GitFilesChanged(commit_hash)])
def GitBranchExists(name):
return len(ExecCommand(['git', 'branch', '--list', name]).strip()) != 0
def GitCheckout(branch, force=False):
"""Checkout an existing branch.
force throws away local changes."""
ExecCommand(['git', 'checkout'] +
(['--force'] if force else []) +
[branch])
def GitCheckoutNewBranch(branch):
"""Create and checkout a new git branch."""
ExecCommand(['git', 'checkout', '-b', branch])
def GitDeleteBranch(branch, force=False):
"""Force-delete a branch."""
ExecCommand(['git', 'branch', '-D' if force else '-d', branch])
def GitAdd(file):
ExecCommand(['git', 'add', file])
def GitCommit(message):
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(message)
tmp.flush()
ExecCommand(['git', 'commit', '--file=%s' % tmp.name])
def UploadChanges():
"""Upload changes, don't prompt."""
# TODO(jfb) Using the commit queue and avoiding git try + manual commit
# would be much nicer. See '--use-commit-queue'
return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])
def GitTry():
return ExecCommand(['git', 'try'])
def FindCommitWithGitSvnId(git_svn_id):
while True:
# This command needs to retry because git-svn partially rebuild its
# revision map for every commit. Asking it a second time fixes the
# issue.
out = ExecCommand(['git', 'svn', 'find-rev', 'r' + git_svn_id]).strip()
if not re.match('^Partial-rebuilding ', out):
break
return out
def CommitMessageToCleanDict(commit_message):
"""Extract and clean commit message fields that follow the NaCl commit
message convention. Don't repeat them as-is, to avoid confusing our
infrastructure."""
res = {}
fields = [
['git svn id', ('\s*git-svn-id: '
'svn://[^@]+@([0-9]+) [a-f0-9\-]+'), '<none>'],
['reviewers tbr', '\s*TBR=([^\n]+)', ''],
['reviewers', '\s*R=([^\n]+)', ''],
['review url', '\s*Review URL: *([^\n]+)', '<none>'],
['bug', '\s*BUG=([^\n]+)', '<none>'],
['test', '\s*TEST=([^\n]+)', '<none>'],
]
for key, regex, none in fields:
found = re.search(regex, commit_message)
if found:
commit_message = commit_message.replace(found.group(0), '')
res[key] = found.group(1).strip()
else:
res[key] = none
res['body'] = commit_message.strip()
return res
def SendEmail(user_email, out):
if user_email:
sys.stderr.write('\nSending email to %s.\n' % user_email)
msg = email.mime.text.MIMEText(out)
msg['Subject'] = '[PNaCl revision updater] failure!'
msg['From'] = 'tool_revisions-bot@chromium.org'
msg['To'] = user_email
s = smtplib.SMTP('localhost')
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
else:
sys.stderr.write('\nNo email address specified.')
def DryRun(out):
sys.stdout.write("DRY RUN: " + out + "\n")
def Done(out):
sys.stdout.write(out)
sys.exit(0)
class CLInfo:
"""Changelist information: sorted dictionary of NaCl-standard fields."""
def __init__(self, desc):
self._desc = desc
self._vals = collections.OrderedDict([
('git svn id', None),
('hash', None),
('author', None),
('date', None),
('subject', None),
('commits since', None),
('bug', None),
('test', None),
('review url', None),
('reviewers tbr', None),
('reviewers', None),
('body', None),
])
def __getitem__(self, key):
return self._vals[key]
def __setitem__(self, key, val):
assert key in self._vals.keys()
self._vals[key] = str(val)
def __str__(self):
"""Changelist to string.
A short description of the change, e.g.:
r12345: (tom@example.com) Subject of the change.
If the change is itself pulling in other changes from
sub-repositories then take its relevant description and append it to
the string. These sub-directory updates are also script-generated
and therefore have a predictable format. e.g.:
r12345: (tom@example.com) Subject of the change.
| dead123: (dick@example.com) Other change in another repository.
| beef456: (harry@example.com) Yet another cross-repository change.
"""
desc = (' r' + self._vals['git svn id'] + ': (' +
self._vals['author'] + ') ' +
self._vals['subject'])
if GitChangesPath(self._vals['hash'], 'pnacl/COMPONENT_REVISIONS'):
git_hash_abbrev = '[0-9a-fA-F]{7}'
email = '[^@)]+@[^)]+\.[^)]+'
desc = '\n'.join([desc] + [
' | ' + line for line in self._vals['body'].split('\n') if
re.match('^ *%s: \(%s\) .*$' % (git_hash_abbrev, email), line)])
return desc
def FmtOut(tr_points_at, pnacl_changes, err=[], msg=[]):
assert isinstance(err, list)
assert isinstance(msg, list)
old_svn_id = tr_points_at['git svn id']
new_svn_id = pnacl_changes[-1]['git svn id'] if pnacl_changes else '?'
changes = '\n'.join([str(cl) for cl in pnacl_changes])
bugs = '\n'.join(list(set(
['BUG= ' + cl['bug'].strip() if cl['bug'] else '' for
cl in pnacl_changes]) - set([''])))
reviewers = ', '.join(list(set(
[r.strip() for r in
(','.join([
cl['author'] + ',' + cl['reviewers tbr'] + ',' + cl['reviewers']
for cl in pnacl_changes])).split(',')]) - set([''])))
return (('*** ERROR ***\n' if err else '') +
'\n\n'.join(err) +
'\n\n'.join(msg) +
('\n\n' if err or msg else '') +
('Update revision for PNaCl r%s->r%s\n\n'
'Pull the following PNaCl changes into NaCl:\n%s\n\n'
'%s\n'
'R= %s\n'
'TEST=git try\n'
'NOTRY=true\n'
'(Please LGTM this change and tick the "commit" box)\n' %
(old_svn_id, new_svn_id, changes, bugs, reviewers)))
def Main():
args = ParseArgs(sys.argv[1:])
tr_points_at = CLInfo('revision update points at PNaCl version')
pnacl_changes = []
msg = []
branch = GitCurrentBranch()
assert branch == 'master', ('Must be on branch master, currently on %s' %
branch)
try:
status = GitStatus()
assert len(status) == 0, ("Repository isn't clean:\n %s" %
'\n '.join(status))
SyncSources()
# The current revision file points at a specific PNaCl LLVM
# version. LLVM is checked-in to the NaCl repository, but its head
# isn't necessarily the one that we currently use in PNaCl.
(pnacl_revision, translator_revision) = GetCurrentRevision()
tr_points_at['git svn id'] = pnacl_revision
tr_points_at['hash'] = FindCommitWithGitSvnId(tr_points_at['git svn id'])
tr_points_at['date'] = GitCommitInfo(
info='date', obj=tr_points_at['hash'], num=1)
tr_points_at['subject'] = GitCommitInfo(
info='subject', obj=tr_points_at['hash'], num=1)
recent_commits = GitCommitsSince(tr_points_at['date'])
tr_points_at['commits since'] = len(recent_commits)
assert len(recent_commits) > 1
if args.svn_id and args.svn_id <= int(tr_points_at['git svn id']):
Done(FmtOut(tr_points_at, pnacl_changes,
err=["Can't update to SVN ID r%s, the current "
"PNaCl revision's SVN ID (r%s) is more recent." %
(args.svn_id, tr_points_at['git svn id'])]))
# Find the commits changing PNaCl files that follow the previous
# PNaCl revision pointer.
pnacl_pathes = ['pnacl/', 'toolchain_build/']
pnacl_hashes = list(set(reduce(
lambda acc, lst: acc + lst,
[[cl for cl in recent_commits[:-1] if
GitChangesPath(cl, path)] for
path in pnacl_pathes])))
for hash in pnacl_hashes:
cl = CLInfo('PNaCl change ' + hash)
cl['hash'] = hash
for i in ['author', 'date', 'subject']:
cl[i] = GitCommitInfo(info=i, obj=hash, num=1)
for k,v in CommitMessageToCleanDict(
GitCommitInfo(info='body', obj=hash, num=1)).iteritems():
cl[k] = v
pnacl_changes.append(cl)
# The PNaCl hashes weren't ordered chronologically, make sure the
# changes are.
pnacl_changes.sort(key=lambda x: int(x['git svn id']))
if args.svn_id:
pnacl_changes = [cl for cl in pnacl_changes if
int(cl['git svn id']) <= args.svn_id]
if len(pnacl_changes) == 0:
Done(FmtOut(tr_points_at, pnacl_changes,
msg=['No PNaCl change since r%s.' %
tr_points_at['git svn id']]))
new_pnacl_revision = pnacl_changes[-1]['git svn id']
new_branch_name = ('pnacl-revision-update-to-%s' %
new_pnacl_revision)
if GitBranchExists(new_branch_name):
# TODO(jfb) Figure out if git-try succeeded, checkout the branch
# and dcommit.
raise Exception("Branch %s already exists, the change hasn't "
"landed yet.\nPlease check trybots and dcommit it "
"manually." % new_branch_name)
if args.dry_run:
DryRun("Would check out branch: " + new_branch_name)
else:
GitCheckoutNewBranch(new_branch_name)
if args.dry_run:
DryRun("Would update PNaCl revision to: %s" % new_pnacl_revision)
else:
SetCurrentRevision(new_pnacl_revision)
for f in REV_FILES:
GitAdd(f)
GitCommit(FmtOut(tr_points_at, pnacl_changes))
upload_res = UploadChanges()
msg += ['Upload result:\n%s' % upload_res]
try_res = GitTry()
msg += ['Try result:\n%s' % try_res]
GitCheckout('master', force=False)
Done(FmtOut(tr_points_at, pnacl_changes, msg=msg))
except SystemExit as e:
# Normal exit.
raise
except (BaseException, Exception) as e:
# Leave the branch around, if any was created: it'll prevent next
# runs of the cronjob from succeeding until the failure is fixed.
out = FmtOut(tr_points_at, pnacl_changes, msg=msg,
err=['Failed at %s: %s' % (datetime.datetime.now(), e)])
sys.stderr.write(out)
if not args.dry_run:
SendEmail(args.email, out)
GitCheckout('master', force=True)
raise
if __name__ == '__main__':
Main()
|
"""
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
test
Description: Test module
Initial Version: Feb 23, 2011
@author: Michael Joseph Walsh
"""
def helloworld():
"""
Returns "hello world" annd prints "returning 'hello world'" to the
sys.stdout
"""
print "returning 'hello world'"
return "hello world"
def greaterThanTen(n):
"""
Returns True if 'n' is greater than 10
"""
return n>10
class MyClass(object):
def __init__(self):
self._globals = {}
@property
def globals(self):
return self._globals
@globals.setter
def globals(self, value):
self._globals = value
a = MyClass()
locals = {}
exec("a = 1" ,a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("a += 1", a.globals, locals)
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
a.globals["b"] = 5
print "globals = {0}".format([g for g in a.globals if not g.startswith("__")])
print "locals = {0}".format(locals)
exec("global b;b += 1", a.globals, locals)
|
from django.views.generic import *
|
"""News Tests"""
|
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from math import pi, sin, cos, log, exp, atan
DTOR = pi / 180.
RTOD = 180. / pi
def get_width_height(envelope):
# Getting the lower-left, upper-left, and upper-right
# coordinates of the envelope.
ll = Point(envelope[0][0])
ul = Point(envelope[0][1])
ur = Point(envelope[0][2])
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each
# one of the zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the
# parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixl
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude
# value with with the number of degrees/pixel at the given
# zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = get_width_height(env)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile = self.tile(center, z)
tile_w, tile_h = get_width_height(tile)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
|
import calendar
import json
from datetime import datetime
from time import gmtime, time
from urlparse import parse_qsl, urlparse
from wsgiref.handlers import format_date_time
import jwt
from browserid.errors import ExpiredSignatureError
from django_statsd.clients import statsd
from receipts import certs
from lib.cef_loggers import receipt_cef
from lib.crypto.receipt import sign
from lib.utils import static_url
from services.utils import settings
from utils import (CONTRIB_CHARGEBACK, CONTRIB_NO_CHARGE, CONTRIB_PURCHASE,
CONTRIB_REFUND, log_configure, log_exception, log_info,
mypool)
log_configure()
import receipts # NOQA, used for patching in the tests
status_codes = {
200: '200 OK',
405: '405 Method Not Allowed',
500: '500 Internal Server Error',
}
class VerificationError(Exception):
pass
class InvalidReceipt(Exception):
"""
InvalidReceipt takes a message, which is then displayed back to the app so
they can understand the failure.
"""
pass
class RefundedReceipt(Exception):
pass
class Verify:
def __init__(self, receipt, environ):
self.receipt = receipt
self.environ = environ
# This is so the unit tests can override the connection.
self.conn, self.cursor = None, None
def check_full(self):
"""
This is the default that verify will use, this will
do the entire stack of checks.
"""
receipt_domain = urlparse(static_url('WEBAPPS_RECEIPT_URL')).netloc
try:
self.decoded = self.decode()
self.check_type('purchase-receipt')
self.check_url(receipt_domain)
self.check_purchase()
except InvalidReceipt, err:
return self.invalid(str(err))
except RefundedReceipt:
return self.refund()
return self.ok_or_expired()
def check_without_purchase(self):
"""
This is what the developer and reviewer receipts do, we aren't
expecting a purchase, but require a specific type and install.
"""
try:
self.decoded = self.decode()
self.check_type('developer-receipt', 'reviewer-receipt')
self.check_url(settings.DOMAIN)
except InvalidReceipt, err:
return self.invalid(str(err))
return self.ok_or_expired()
def check_without_db(self, status):
"""
This is what test receipts do, no purchase or install check.
In this case the return is custom to the caller.
"""
assert status in ['ok', 'expired', 'invalid', 'refunded']
try:
self.decoded = self.decode()
self.check_type('test-receipt')
self.check_url(settings.DOMAIN)
except InvalidReceipt, err:
return self.invalid(str(err))
return getattr(self, status)()
def decode(self):
"""
Verifies that the receipt can be decoded and that the initial
contents of the receipt are correct.
If its invalid, then just return invalid rather than give out any
information.
"""
try:
receipt = decode_receipt(self.receipt)
except:
log_exception({'receipt': '%s...' % self.receipt[:10],
'app': self.get_app_id(raise_exception=False)})
log_info('Error decoding receipt')
raise InvalidReceipt('ERROR_DECODING')
try:
assert receipt['user']['type'] == 'directed-identifier'
except (AssertionError, KeyError):
log_info('No directed-identifier supplied')
raise InvalidReceipt('NO_DIRECTED_IDENTIFIER')
return receipt
def check_type(self, *types):
"""
Verifies that the type of receipt is what we expect.
"""
if self.decoded.get('typ', '') not in types:
log_info('Receipt type not in %s' % ','.join(types))
raise InvalidReceipt('WRONG_TYPE')
def check_url(self, domain):
"""
Verifies that the URL of the verification is what we expect.
:param domain: the domain you expect the receipt to be verified at,
note that "real" receipts are verified at a different domain
from the main marketplace domain.
"""
path = self.environ['PATH_INFO']
parsed = urlparse(self.decoded.get('verify', ''))
if parsed.netloc != domain:
log_info('Receipt had invalid domain')
raise InvalidReceipt('WRONG_DOMAIN')
if parsed.path != path:
log_info('Receipt had the wrong path')
raise InvalidReceipt('WRONG_PATH')
def get_user(self):
"""
Attempt to retrieve the user information from the receipt.
"""
try:
return self.decoded['user']['value']
except KeyError:
# If somehow we got a valid receipt without a uuid
# that's a problem. Log here.
log_info('No user in receipt')
raise InvalidReceipt('NO_USER')
def get_storedata(self):
"""
Attempt to retrieve the storedata information from the receipt.
"""
try:
storedata = self.decoded['product']['storedata']
return dict(parse_qsl(storedata))
except Exception, e:
log_info('Invalid store data: {err}'.format(err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_app_id(self, raise_exception=True):
"""
Attempt to retrieve the app id from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['id'])
except Exception, e:
if raise_exception:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for app id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_contribution_id(self):
"""
Attempt to retrieve the contribution id
from the storedata in the receipt.
"""
try:
return int(self.get_storedata()['contrib'])
except Exception, e:
# There was some value for storedata but it was invalid.
log_info('Invalid store data for contrib id: {err}'.format(
err=e))
raise InvalidReceipt('WRONG_STOREDATA')
def get_inapp_id(self):
"""
Attempt to retrieve the inapp id
from the storedata in the receipt.
"""
return self.get_storedata()['inapp_id']
def setup_db(self):
"""
Establish a connection to the database.
All database calls are done at a low level and avoid the
Django ORM.
"""
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
def check_purchase(self):
"""
Verifies that the app or inapp has been purchased.
"""
storedata = self.get_storedata()
if 'contrib' in storedata:
self.check_purchase_inapp()
else:
self.check_purchase_app()
def check_purchase_inapp(self):
"""
Verifies that the inapp has been purchased.
"""
self.setup_db()
sql = """SELECT i.guid, c.type FROM stats_contributions c
JOIN inapp_products i ON i.id=c.inapp_product_id
WHERE c.id = %(contribution_id)s LIMIT 1;"""
self.cursor.execute(
sql,
{'contribution_id': self.get_contribution_id()}
)
result = self.cursor.fetchone()
if not result:
log_info('Invalid in-app receipt, no purchase')
raise InvalidReceipt('NO_PURCHASE')
contribution_inapp_id, purchase_type = result
self.check_purchase_type(purchase_type)
self.check_inapp_product(contribution_inapp_id)
def check_inapp_product(self, contribution_inapp_id):
if contribution_inapp_id != self.get_inapp_id():
log_info('Invalid receipt, inapp_id does not match')
raise InvalidReceipt('NO_PURCHASE')
def check_purchase_app(self):
"""
Verifies that the app has been purchased by the user.
"""
self.setup_db()
sql = """SELECT type FROM addon_purchase
WHERE addon_id = %(app_id)s
AND uuid = %(uuid)s LIMIT 1;"""
self.cursor.execute(sql, {'app_id': self.get_app_id(),
'uuid': self.get_user()})
result = self.cursor.fetchone()
if not result:
log_info('Invalid app receipt, no purchase')
raise InvalidReceipt('NO_PURCHASE')
self.check_purchase_type(result[0])
def check_purchase_type(self, purchase_type):
"""
Verifies that the purchase type is of a valid type.
"""
if purchase_type in (CONTRIB_REFUND, CONTRIB_CHARGEBACK):
log_info('Valid receipt, but refunded')
raise RefundedReceipt
elif purchase_type in (CONTRIB_PURCHASE, CONTRIB_NO_CHARGE):
log_info('Valid receipt')
return
else:
log_info('Valid receipt, but invalid contribution')
raise InvalidReceipt('WRONG_PURCHASE')
def invalid(self, reason=''):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Invalid receipt'
)
return {'status': 'invalid', 'reason': reason}
def ok_or_expired(self):
# This receipt is ok now let's check it's expiry.
# If it's expired, we'll have to return a new receipt
try:
expire = int(self.decoded.get('exp', 0))
except ValueError:
log_info('Error with expiry in the receipt')
return self.expired()
now = calendar.timegm(gmtime()) + 10 # For any clock skew.
if now > expire:
log_info('This receipt has expired: %s UTC < %s UTC'
% (datetime.utcfromtimestamp(expire),
datetime.utcfromtimestamp(now)))
return self.expired()
return self.ok()
def ok(self):
return {'status': 'ok'}
def refund(self):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Refunded receipt'
)
return {'status': 'refunded'}
def expired(self):
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'verify',
'Expired receipt'
)
if settings.WEBAPPS_RECEIPT_EXPIRED_SEND:
self.decoded['exp'] = (calendar.timegm(gmtime()) +
settings.WEBAPPS_RECEIPT_EXPIRY_SECONDS)
# Log that we are signing a new receipt as well.
receipt_cef.log(
self.environ,
self.get_app_id(raise_exception=False),
'sign',
'Expired signing request'
)
return {'status': 'expired',
'receipt': sign(self.decoded)}
return {'status': 'expired'}
def get_headers(length):
return [('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST'),
('Content-Type', 'application/json'),
('Content-Length', str(length)),
('Cache-Control', 'no-cache'),
('Last-Modified', format_date_time(time()))]
def decode_receipt(receipt):
"""
Cracks the receipt using the private key. This will probably change
to using the cert at some point, especially when we get the HSM.
"""
with statsd.timer('services.decode'):
if settings.SIGNING_SERVER_ACTIVE:
verifier = certs.ReceiptVerifier(valid_issuers=
settings.SIGNING_VALID_ISSUERS)
try:
result = verifier.verify(receipt)
except ExpiredSignatureError:
# Until we can do something meaningful with this, just ignore.
return jwt.decode(receipt.split('~')[1], verify=False)
if not result:
raise VerificationError()
return jwt.decode(receipt.split('~')[1], verify=False)
else:
key = jwt.rsa_load(settings.WEBAPPS_RECEIPT_KEY)
raw = jwt.decode(receipt, key)
return raw
def status_check(environ):
output = ''
# Check we can read from the users_install table, should be nice and
# fast. Anything that fails here, connecting to db, accessing table
# will be an error we need to know about.
if not settings.SIGNING_SERVER_ACTIVE:
return 500, 'SIGNING_SERVER_ACTIVE is not set'
try:
conn = mypool.connect()
cursor = conn.cursor()
cursor.execute('SELECT id FROM users_install ORDER BY id DESC LIMIT 1')
except Exception, err:
return 500, str(err)
return 200, output
def receipt_check(environ):
output = ''
with statsd.timer('services.verify'):
data = environ['wsgi.input'].read()
try:
verify = Verify(data, environ)
return 200, json.dumps(verify.check_full())
except:
log_exception('<none>')
return 500, ''
return output
def application(environ, start_response):
body = ''
path = environ.get('PATH_INFO', '')
if path == '/services/status/':
status, body = status_check(environ)
else:
# Only allow POST through as per spec.
if environ.get('REQUEST_METHOD') != 'POST':
status = 405
else:
status, body = receipt_check(environ)
start_response(status_codes[status], get_headers(len(body)))
return [body]
|
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
from ._common import ConfidenceInterval
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
Methods
-------
proportion_ci :
Compute the confidence interval for the estimate of the proportion.
"""
def __init__(self, k, n, alternative, pvalue, proportion_estimate):
self.k = k
self.n = n
self.alternative = alternative
self.proportion_estimate = proportion_estimate
self.pvalue = pvalue
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"proportion_estimate={self.proportion_estimate}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for the estimated proportion.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.proportion_estimate
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``proportion_estimate``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The estimated proportion is simply ``3/15``:
>>> result.proportion_estimate
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
proportion_estimate=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
----------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
|
import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models import Index
from django.utils.regex_helper import _lazy_re_compile
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',))
field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'bigint unsigned': 'PositiveBigIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name))
return [
FieldInfo(
name, data_type, None, get_field_size(data_type), None, None,
not notnull, default, pk == 1,
)
for cid, name, data_type, notnull, default, pk in cursor.fetchall()
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m.group(1) if m.group(1) else m.group(2)
return None
def _get_foreign_key_constraints(self, cursor, table_name):
constraints = {}
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest.
id_, _, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ')'):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ','):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(sqlparse.tokens.Keyword, 'CONSTRAINT')
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, 'CHECK'):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = {
'unique': True,
'columns': unique_columns,
'primary_key': False,
'foreign_key': None,
'check': False,
'index': False,
} if unique_columns else None
check_constraint = {
'check': True,
'columns': check_columns,
'primary_key': False,
'unique': False,
'foreign_key': None,
'index': False,
} if check_columns else None
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
break
# Parse columns and constraint definition
while True:
constraint_name, unique, check, end_token = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = check
if end_token.match(sqlparse.tokens.Punctuation, ')'):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (
self.connection.ops.quote_name(table_name),
)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {info.name for info in self.get_table_description(cursor, table_name)}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
# There's at most one row.
sql, = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
constraints.update(self._get_foreign_key_constraints(cursor, table_name))
return constraints
|
try:
from django.utils.encoding import force_text # noqa
except ImportError:
from django.utils.encoding import force_unicode as force_text # noqa
try:
from urllib2 import urlopen # noqa
except ImportError:
from urllib.request import urlopen # noqa
|
r"""
Modeling and inversion of temperature residuals measured in wells due to
temperature perturbations in the surface.
Perturbations can be of two kinds: **abrupt** or **linear**.
Forward modeling of these types of changes is done with functions:
* :func:`~fatiando.geothermal.climsig.abrupt`
* :func:`~fatiando.geothermal.climsig.linear`
Assumeing that the temperature perturbation was abrupt. The residual
temperature at a depth :math:`z_i` in the well at a time :math:`t` after the
perturbation is given by
.. math::
T_i(z_i) = A \left[1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)\right]
where :math:`A` is the amplitude of the perturbation, :math:`\lambda` is the
thermal diffusivity of the medium, and :math:`\mathrm{erf}` is the error
function.
For the case of a linear change, the temperature is
.. math::
T_i(z_i) = A \left[
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
\right]
Given the temperature measured at different depths, we can **invert** for the
amplitude and age of the change. The available inversion solvers are:
* :class:`~fatiando.geothermal.climsig.SingleChange`: inverts for the
parameters of a single temperature change. Can use both abrupt and linear
models.
----
"""
from __future__ import division
import numpy
import scipy.special
from ..inversion.base import Misfit
from ..constants import THERMAL_DIFFUSIVITY_YEAR
def linear(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to a linear
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
The depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
tmp = zp / numpy.sqrt(4. * diffus * age)
res = amp * ((1. + 2 * tmp ** 2) * scipy.special.erfc(tmp)
- 2. / numpy.sqrt(numpy.pi) * tmp * numpy.exp(-tmp ** 2))
return res
def abrupt(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to an abrupt
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
Arry with the depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
return amp * (1. - scipy.special.erf(zp / numpy.sqrt(4. * diffus * age)))
class SingleChange(Misfit):
r"""
Invert the well temperature data for a single change in temperature.
The parameters of the change are its amplitude and age.
See the docstring of :mod:`fatiando.geothermal.climsig` for more
information and examples.
Parameters:
* temp : array
The temperature profile
* zp : array
Depths along the profile
* mode : string
The type of change: ``'abrupt'`` for an abrupt change, ``'linear'`` for
a linear change.
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the
:meth:`~fatiando.inversion.base.FitMixin.config` method.
See the example bellow.
Example with synthetic data:
>>> import numpy
>>> zp = numpy.arange(0, 100, 1)
>>> # For an ABRUPT change
>>> amp = 2
>>> age = 100 # Uses years to avoid overflows
>>> temp = abrupt(amp, age, zp)
>>> # Run the inversion for the amplitude and time
>>> # This is a non-linear problem, so use the Levemberg-Marquardt
>>> # algorithm with an initial estimate
>>> solver = SingleChange(temp, zp, mode='abrupt').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 2.00 age: 100.00
>>> # For a LINEAR change
>>> amp = 3.45
>>> age = 52.5
>>> temp = linear(amp, age, zp)
>>> solver = SingleChange(temp, zp, mode='linear').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 3.45 age: 52.50
Notes:
For **abrupt** changes, derivatives with respect to the amplitude and age
are calculated using the formula
.. math::
\frac{\partial T_i}{\partial A} = 1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)
and
.. math::
\frac{\partial T_i}{\partial t} = \frac{A}{t\sqrt{\pi}}
\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\exp\left[-\left(\frac{z_i}{\sqrt{4\lambda t}}\right)^2\right]
respectively.
For **linear** changes, derivatives with respect to the age are calculated
using a 2-point finite difference approximation. Derivatives with respect
to amplitude are calculate using the formula
.. math::
\frac{\partial T_i}{\partial A} =
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
"""
def __init__(self, temp, zp, mode, diffus=THERMAL_DIFFUSIVITY_YEAR):
if len(temp) != len(zp):
raise ValueError("temp and zp must be of same length")
if mode not in ['abrupt', 'linear']:
raise ValueError("Invalid mode: %s. Must be 'abrupt' or 'linear'"
% (mode))
super(SingleChange, self).__init__(
data=temp,
positional=dict(zp=zp),
model=dict(diffus=float(diffus), mode=mode),
nparams=2, islinear=False)
def _get_predicted(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
if self.model['mode'] == 'abrupt':
return abrupt(amp, age, zp, diffus)
if self.model['mode'] == 'linear':
return linear(amp, age, zp, diffus)
def _get_jacobian(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
mode = self.model['mode']
if mode == 'abrupt':
tmp = zp / numpy.sqrt(4. * diffus * age)
jac = numpy.transpose([
abrupt(1., age, zp, diffus),
(amp * tmp * numpy.exp(-(tmp ** 2)) /
(numpy.sqrt(numpy.pi) * age))])
if mode == 'linear':
delta = 0.5
at_p = linear(amp, age, zp, diffus)
jac = numpy.transpose([
linear(1., age, zp, diffus),
(linear(amp, age + delta, zp, diffus) -
linear(amp, age - delta, zp, diffus)) / (2 * delta)])
return jac
|
from paths import rpath,mpath,opath
from make_apex_cubes import all_apexfiles,get_source_tel_line,_is_sci, hdr_to_freq
from pyspeckit.spectrum.readers import read_class
from astropy.table import Table
from astropy import log
from astropy.utils.console import ProgressBar
import numpy as np
import os
import pylab as pl
def tsys_data(plot=False):
if plot:
fig1 = pl.figure(1)
fig2 = pl.figure(2)
fig1.clf()
fig2.clf()
ax1 = fig1.gca()
ax2 = fig2.gca()
datadict = {}
tbldict = {}
for apex_filename in all_apexfiles:
log.info(apex_filename)
cl = read_class.ClassObject(apex_filename)
sourcereg,line,telescopes = get_source_tel_line(apex_filename)
sci_sources = [source for source in cl.sources
if _is_sci(source, sourcereg)]
datadict[apex_filename] = {t:[] for t in telescopes}
for telescope in telescopes:
log.info('{0}: {1}'.format(apex_filename, telescope))
selection = [x
for source in sci_sources
for x in cl.select_spectra(telescope=telescope,
line=line,
source=source)]
spdheader = cl.read_observations(selection, progressbar=True)
datadict[apex_filename][telescope] = zip(*[(sp.std(), h['TSYS'])
for sp,h in ProgressBar(spdheader)])
tbl = Table([datadict[apex_filename][t][ii]
for t in telescopes
for ii in (0,1)],
names=[t+"_"+s
for t in telescopes
for s in ('STDDEV','TSYS',)],
dtype=['float'
for t in telescopes
for s in ('STDDEV','TSYS',)
])
log.info(os.path.basename(apex_filename)+"_tsys.fits")
tbl.write(os.path.basename(apex_filename)+"_tsys.fits", overwrite=True)
tbldict[apex_filename] = tbl
if plot:
ax1.plot(tbl['{0}_TSYS'.format(telescopes[0])],
tbl['{0}_STDDEV'.format(telescopes[0])],
',', alpha=0.8)
ax1.set_xlabel("TSYS")
ax1.set_ylabel("Std Dev")
fig1.savefig("StdDev_vs_TSYS_{0}.png".format(telescopes[0]))
ax2.plot(tbl['{0}_TSYS'.format(telescopes[1])],
tbl['{0}_STDDEV'.format(telescopes[1])],
',', alpha=0.8)
ax2.set_xlabel("TSYS")
ax2.set_ylabel("Std Dev")
pl.draw()
pl.show()
fig2.savefig("StdDev_vs_TSYS_{0}.png".format(telescopes[1]))
return datadict,tbldict
|
from .pandas_vb_common import *
class SetOperations(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=10000, freq='T')
self.rng2 = self.rng[:(-1)]
# object index with datetime values
if (self.rng.dtype == object):
self.idx_rng = self.rng.view(Index)
else:
self.idx_rng = self.rng.asobject
self.idx_rng2 = self.idx_rng[:(-1)]
# other datetime
N = 100000
A = N - 20000
B = N + 20000
self.dtidx1 = DatetimeIndex(range(N))
self.dtidx2 = DatetimeIndex(range(A, B))
self.dtidx3 = DatetimeIndex(range(N, B))
# integer
self.N = 1000000
self.options = np.arange(self.N)
self.left = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
self.right = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
# strings
N = 10000
strs = tm.rands_array(10, N)
self.leftstr = Index(strs[:N * 2 // 3])
self.rightstr = Index(strs[N // 3:])
def time_datetime_intersection(self):
self.rng.intersection(self.rng2)
def time_datetime_union(self):
self.rng.union(self.rng2)
def time_datetime_difference(self):
self.dtidx1.difference(self.dtidx2)
def time_datetime_difference_disjoint(self):
self.dtidx1.difference(self.dtidx3)
def time_datetime_symmetric_difference(self):
self.dtidx1.symmetric_difference(self.dtidx2)
def time_index_datetime_intersection(self):
self.idx_rng.intersection(self.idx_rng2)
def time_index_datetime_union(self):
self.idx_rng.union(self.idx_rng2)
def time_int64_intersection(self):
self.left.intersection(self.right)
def time_int64_union(self):
self.left.union(self.right)
def time_int64_difference(self):
self.left.difference(self.right)
def time_int64_symmetric_difference(self):
self.left.symmetric_difference(self.right)
def time_str_difference(self):
self.leftstr.difference(self.rightstr)
def time_str_symmetric_difference(self):
self.leftstr.symmetric_difference(self.rightstr)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = pd.date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Float64(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeFloatIndex(1000000)
self.mask = ((np.arange(self.idx.size) % 3) == 0)
self.series_mask = Series(self.mask)
self.baseidx = np.arange(1000000.0)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_construct(self):
Index(self.baseidx)
def time_div(self):
(self.idx / 2)
def time_get(self):
self.idx[1]
def time_mul(self):
(self.idx * 2)
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class StringIndex(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeStringIndex(1000000)
self.mask = ((np.arange(1000000) % 3) == 0)
self.series_mask = Series(self.mask)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class Multi1(object):
goal_time = 0.2
def setup(self):
(n, k) = (200, 5000)
self.levels = [np.arange(n), tm.makeStringIndex(n).values, (1000 + np.arange(n))]
self.labels = [np.random.choice(n, (k * n)) for lev in self.levels]
self.mi = MultiIndex(levels=self.levels, labels=self.labels)
self.iterables = [tm.makeStringIndex(10000), range(20)]
def time_duplicated(self):
self.mi.duplicated()
def time_from_product(self):
MultiIndex.from_product(self.iterables)
class Multi2(object):
goal_time = 0.2
def setup(self):
self.n = ((((3 * 5) * 7) * 11) * (1 << 10))
(low, high) = (((-1) << 12), (1 << 12))
self.f = (lambda k: np.repeat(np.random.randint(low, high, (self.n // k)), k))
self.i = np.random.permutation(self.n)
self.mi = MultiIndex.from_arrays([self.f(11), self.f(7), self.f(5), self.f(3), self.f(1)])[self.i]
self.a = np.repeat(np.arange(100), 1000)
self.b = np.tile(np.arange(1000), 100)
self.midx2 = MultiIndex.from_arrays([self.a, self.b])
self.midx2 = self.midx2.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi.sortlevel()
def time_sortlevel_zero(self):
self.midx2.sortlevel(0)
def time_sortlevel_one(self):
self.midx2.sortlevel(1)
class Multi3(object):
goal_time = 0.2
def setup(self):
self.level1 = range(1000)
self.level2 = date_range(start='1/1/2012', periods=100)
self.mi = MultiIndex.from_product([self.level1, self.level2])
def time_datetime_level_values_full(self):
self.mi.copy().values
def time_datetime_level_values_sliced(self):
self.mi[:10].values
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
|
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.hashing import hash_pandas_object
from dask.dataframe.utils import assert_eq
@pytest.mark.parametrize('obj', [
pd.Series([1, 2, 3]),
pd.Series([1.0, 1.5, 3.2]),
pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
pd.Series(['a', 'b', 'c']),
pd.Series([True, False, True]),
pd.Index([1, 2, 3]),
pd.Index([True, False, True]),
pd.DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),
pd.util.testing.makeMissingDataframe(),
pd.util.testing.makeMixedDataFrame(),
pd.util.testing.makeTimeDataFrame(),
pd.util.testing.makeTimeSeries(),
pd.util.testing.makeTimedeltaIndex()])
def test_hash_pandas_object(obj):
a = hash_pandas_object(obj)
b = hash_pandas_object(obj)
if isinstance(a, np.ndarray):
np.testing.assert_equal(a, b)
else:
assert_eq(a, b)
|
from datetime import date
import json
import sys
out_file = 'Overlay_autogen.cpp'
in_file = 'overlay_widgets.json'
template_out_file = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Autogenerated overlay widget declarations.
namespace gl
{{
using namespace overlay;
namespace
{{
int GetFontSize(int fontSize, bool largeFont)
{{
if (largeFont && fontSize > 0)
{{
return fontSize - 1;
}}
return fontSize;
}}
}} // anonymous namespace
void Overlay::initOverlayWidgets()
{{
const bool kLargeFont = rx::IsAndroid();
{init_widgets}
}}
}} // namespace gl
"""
template_init_widget = u"""{{
const int32_t fontSize = GetFontSize({font_size}, kLargeFont);
const int32_t offsetX = {offset_x};
const int32_t offsetY = {offset_y};
const int32_t width = {width};
const int32_t height = {height};
widget->{subwidget}type = WidgetType::{type};
widget->{subwidget}fontSize = fontSize;
widget->{subwidget}coords[0] = {coord0};
widget->{subwidget}coords[1] = {coord1};
widget->{subwidget}coords[2] = {coord2};
widget->{subwidget}coords[3] = {coord3};
widget->{subwidget}color[0] = {color_r};
widget->{subwidget}color[1] = {color_g};
widget->{subwidget}color[2] = {color_b};
widget->{subwidget}color[3] = {color_a};
}}
"""
def extract_type_and_constructor(properties):
constructor = properties['type']
args_separated = constructor.split('(', 1)
if len(args_separated) == 1:
return constructor, constructor
type_no_constructor = args_separated[0]
return type_no_constructor, constructor
def get_font_size_constant(properties):
return 'kFontLayer' + properties['font'].capitalize()
def is_graph_type(type):
return type == 'RunningGraph' or type == 'RunningHistogram'
def is_text_type(type):
return not is_graph_type(type)
class OverlayWidget:
def __init__(self, properties, is_graph_description=False):
if not is_graph_description:
self.name = properties['name']
self.type, self.constructor = extract_type_and_constructor(properties)
self.extract_common(properties)
if is_graph_type(self.type):
description_properties = properties['description']
description_properties['type'] = 'Text'
self.description = OverlayWidget(description_properties, True)
def extract_common(self, properties):
self.color = properties['color']
self.coords = properties['coords']
if is_graph_type(self.type):
self.bar_width = properties['bar_width']
self.height = properties['height']
else:
self.font = get_font_size_constant(properties)
self.length = properties['length']
self.negative_alignment = [False, False]
def is_negative_coord(coords, axis, widgets_so_far):
if isinstance(coords[axis], unicode):
coord_split = coords[axis].split('.')
# The coordinate is in the form other_widget.edge.mode
# We simply need to know if other_widget's coordinate is negative or not.
return widgets_so_far[coord_split[0]].negative_alignment[axis]
return coords[axis] < 0
def set_alignment_flags(overlay_widget, widgets_so_far):
overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0,
widgets_so_far)
overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1,
widgets_so_far)
if is_graph_type(overlay_widget.type):
set_alignment_flags(overlay_widget.description, widgets_so_far)
def get_offset_helper(widget, axis, smaller_coord_side):
# Assume axis is X. This function returns two values:
# - An offset where the bounding box is placed at,
# - Whether this offset is for the left or right edge.
#
# The input coordinate (widget.coord[axis]) is either:
#
# - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box.
# - other_widget.edge.mode: this has multiple possibilities:
# * edge=left, mode=align: the offset is other_widget.left, the edge is left.
# * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right.
# * edge=right, mode=align: the offset is other_widget.right, the edge is right.
# * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left.
#
# The case for the Y axis is similar, with the edge values being top or bottom.
coord = widget.coords[axis]
if not isinstance(coord, unicode):
is_left = coord >= 0
return coord, is_left
coord_split = coord.split('.')
is_left = coord_split[1] == smaller_coord_side
is_align = coord_split[2] == 'align'
other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords'
other_widget_coord_index = axis + (0 if is_left else 2)
offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']'
return offset, is_left == is_align
def get_offset_x(widget):
return get_offset_helper(widget, 0, 'left')
def get_offset_y(widget):
return get_offset_helper(widget, 1, 'top')
def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned):
# See comment in generate_widget_init_helper. This function is implementing the following:
#
# - offset_is_left && is_left_aligned: [offset, offset + width]
# - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)]
# - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset]
# - !offset_is_left && !is_left_aligned: [offset - width, offset]
coord_left = offset if offset_is_left else (offset + ' - ' + width)
coord_right = (offset + ' + ' + width) if offset_is_left else offset
if offset_is_left and not is_left_aligned:
coord_right = 'std::min(' + coord_right + ', -1)'
if not offset_is_left and is_left_aligned:
coord_left = 'std::max(' + coord_left + ', 1)'
return coord_left, coord_right
def generate_widget_init_helper(widget, is_graph_description=False):
font_size = '0'
# Common attributes
color = [channel / 255.0 for channel in widget.color]
offset_x, offset_x_is_left = get_offset_x(widget)
offset_y, offset_y_is_top = get_offset_y(widget)
if is_text_type(widget.type):
# Attributes deriven from text properties
font_size = widget.font
width = str(widget.length) + ' * kFontGlyphWidths[fontSize]'
height = 'kFontGlyphHeights[fontSize]'
else:
# Attributes deriven from graph properties
width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())'
height = widget.height
is_left_aligned = not widget.negative_alignment[0]
is_top_aligned = not widget.negative_alignment[1]
# We have offset_x, offset_y, width and height which together determine the bounding box. If
# offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it
# would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to
# mean aligned to the right side of the screen, we need to make sure that:
#
# - if left aligned: offset_x - width is at minimum 1
# - if right aligned: offset_x + width is at maximum -1
#
# We therefore have the following combinations for the X axis:
#
# - offset_x_is_left && is_left_aligned: [offset_x, offset_x + width]
# - offset_x_is_left && !is_left_aligned: [offset_x, std::min(offset_x + width, -1)]
# - !offset_x_is_left && is_left_aligned: [std::max(1, offset_x - width), offset_x]
# - !offset_x_is_left && !is_left_aligned: [offset_x - width, offset_x]
#
# Similarly for y.
coord0, coord2 = get_bounding_box_coords('offsetX', 'width', offset_x_is_left, is_left_aligned)
coord1, coord3 = get_bounding_box_coords('offsetY', 'height', offset_y_is_top, is_top_aligned)
return template_init_widget.format(
subwidget='description.' if is_graph_description else '',
offset_x=offset_x,
offset_y=offset_y,
width=width,
height=height,
type=widget.type,
font_size=font_size,
coord0=coord0,
coord1=coord1,
coord2=coord2,
coord3=coord3,
color_r=color[0],
color_g=color[1],
color_b=color[2],
color_a=color[3])
def generate_widget_init(widget):
widget_init = '{\n' + widget.type + ' *widget = new ' + widget.constructor + ';\n'
widget_init += generate_widget_init_helper(widget)
widget_init += 'mState.mOverlayWidgets[WidgetId::' + widget.name + '].reset(widget);\n'
if is_graph_type(widget.type):
widget_init += generate_widget_init_helper(widget.description, True)
widget_init += '}\n'
return widget_init
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'inputs':
print(in_file)
return
if len(sys.argv) == 2 and sys.argv[1] == 'outputs':
print(out_file)
return
with open(in_file) as fin:
layout = json.loads(fin.read())
# Read the layouts from the json file and determine alignment of widgets (as they can refer to
# other widgets.
overlay_widgets = {}
for widget_properties in layout['widgets']:
widget = OverlayWidget(widget_properties)
overlay_widgets[widget.name] = widget
set_alignment_flags(widget, overlay_widgets)
# Go over the widgets again and generate initialization code. Note that we need to iterate over
# the widgets in order, so we can't use the overlay_widgets dictionary for iteration.
init_widgets = []
for widget_properties in layout['widgets']:
init_widgets.append(generate_widget_init(overlay_widgets[widget_properties['name']]))
with open(out_file, 'w') as outfile:
outfile.write(
template_out_file.format(
script_name=__file__,
copyright_year=date.today().year,
input_file_name=in_file,
out_file_name=out_file,
init_widgets='\n'.join(init_widgets)))
outfile.close()
if __name__ == '__main__':
sys.exit(main())
|
import json
from unittest.mock import patch
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, int_or_none,
parse_mastodon_document, parse_matrix_document)
from federation.tests.fixtures.hostmeta import (
NODEINFO2_10_DOC, NODEINFO_10_DOC, NODEINFO_20_DOC, STATISTICS_JSON_DOC, MASTODON_DOC, MASTODON_ACTIVITY_DOC,
MASTODON_RC_DOC, MASTODON_DOC_NULL_CONTACT, MATRIX_SYNAPSE_DOC, PLEROMA_MASTODON_API_DOC,
NODEINFO_21_DOC_INVALID_USAGE_COUNTS, MASTODON_DOC_3)
class TestIntOrNone:
def test_returns_negative_values_as_none(self):
assert int_or_none(-1) is None
class TestParseMastodonDocument:
@patch('federation.hostmeta.fetchers.fetch_nodeinfo_document', autospec=True)
def test_calls_nodeinfo_fetcher_if_pleroma(self, mock_fetch):
parse_mastodon_document(json.loads(PLEROMA_MASTODON_API_DOC), 'example.com')
mock_fetch.assert_called_once_with('example.com')
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__null_contact_account(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_NULL_CONTACT), 'example.com')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__rc_version(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_RC_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.1rc1',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__protocols(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_3), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '3.0.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
class TestParseMatrixDocument:
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(403, None))
def test_parse_matrix_document__signups_closed(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': False,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(401, None))
def test_parse_matrix_document__signups_open(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': True,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
class TestParseNodeInfoDocument:
def test_parse_nodeinfo_10_document(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_10_DOC), 'iliketoast.net')
assert result == {
'organization': {
'account': 'podmin@iliketoast.net',
'contact': '',
'name': '',
},
'host': 'iliketoast.net',
'name': 'I Like Toast',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': ["tumblr", "twitter"],
'platform': 'diaspora',
'version': '0.7.4.0-pd0313756',
'features': {
"nodeName": "I Like Toast",
"xmppChat": False,
"camo": {
"markdown": False,
"opengraph": False,
"remotePods": False
},
"adminAccount": "podmin",
},
'activity': {
'users': {
'total': 348,
'half_year': 123,
'monthly': 62,
'weekly': 19,
},
'local_posts': 8522,
'local_comments': 17671,
},
}
def test_parse_nodeinfo_20_document(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_20_DOC), 'iliketoast.net')
assert result == {
'organization': {
'account': 'podmin@iliketoast.net',
'contact': '',
'name': '',
},
'host': 'iliketoast.net',
'name': 'I Like Toast',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': ["tumblr", "twitter"],
'platform': 'diaspora',
'version': '0.7.4.0-pd0313756',
'features': {
"nodeName": "I Like Toast",
"xmppChat": False,
"camo": {
"markdown": False,
"opengraph": False,
"remotePods": False
},
"adminAccount": "podmin",
},
'activity': {
'users': {
'total': 348,
'half_year': 123,
'monthly': 62,
'weekly': 19,
},
'local_posts': 8522,
'local_comments': 17671,
},
}
def test_parse_nodeinfo_21_document__invalid_usage_counts(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_21_DOC_INVALID_USAGE_COUNTS), 'pleroma.local')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'pleroma.local',
'name': 'pleroma.local',
'open_signups': True,
'protocols': ["activitypub"],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'pleroma',
'version': '0.7.4.0-pd0313756',
'features': {},
'activity': {
'users': {
'total': 348,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
class TestParseNodeInfo2Document:
def test_parse_nodeinfo2_10_document(self):
result = parse_nodeinfo2_document(json.loads(NODEINFO2_10_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://example.com/u/admin',
'contact': 'foobar@example.com',
'name': 'Example organization',
},
'host': 'example.com',
'name': 'Example server',
'open_signups': True,
'protocols': ["diaspora", "zot"],
'relay': "tags",
'server_meta': {},
'services': ["facebook", "gnusocial", "twitter"],
'platform': 'example',
'version': '0.5.0',
'features': {},
'activity': {
'users': {
'total': 123,
'half_year': 42,
'monthly': 23,
'weekly': 10,
},
'local_posts': 500,
'local_comments': 1000,
},
}
def test_parse_nodeinfo2_10_document__cleans_port_from_host(self):
response = json.loads(NODEINFO2_10_DOC)
response["server"]["baseUrl"] = "https://example.com:5221"
result = parse_nodeinfo2_document(response, 'example.com')
assert result == {
'organization': {
'account': 'https://example.com/u/admin',
'contact': 'foobar@example.com',
'name': 'Example organization',
},
'host': 'example.com',
'name': 'Example server',
'open_signups': True,
'protocols': ["diaspora", "zot"],
'relay': "tags",
'server_meta': {},
'services': ["facebook", "gnusocial", "twitter"],
'platform': 'example',
'version': '0.5.0',
'features': {},
'activity': {
'users': {
'total': 123,
'half_year': 42,
'monthly': 23,
'weekly': 10,
},
'local_posts': 500,
'local_comments': 1000,
},
}
class TestParseStatisticsJSONDocument:
def test_parse_statisticsjson_document(self):
result = parse_statisticsjson_document(json.loads(STATISTICS_JSON_DOC), 'example.com')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'example.com',
'name': 'diaspora*',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'diaspora',
'version': '0.5.7.0-p56ebcc76',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
|
try:
import urlparse
except ImportError:
#py3k
from urllib import parse as urlparse
import json
from .firebase_token_generator import FirebaseTokenGenerator
from .decorators import http_connection
from .multiprocess_pool import process_pool
from .jsonutil import JSONEncoder
__all__ = ['FirebaseAuthentication', 'FirebaseApplication']
@http_connection(60)
def make_get_request(url, params, headers, connection):
"""
Helper function that makes an HTTP GET request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_get_request('http://firebase.localhost/users', {'print': silent'},
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'John Doe', '2': 'Jane Doe'}
"""
timeout = getattr(connection, 'timeout')
response = connection.get(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_put_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PUT request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users',
'{"1": "Ozgur Vatansever"}',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.put(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_post_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP POST request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {u'name': u'-Inw6zol_2f5ThHwVcSe'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.post(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_patch_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.patch(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_delete_request(url, params, headers, connection):
"""
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.delete(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
class FirebaseUser(object):
"""
Class that wraps the credentials of the authenticated user. Think of
this as a container that holds authentication related data.
"""
def __init__(self, email, firebase_auth_token, provider, id=None):
self.email = email
self.firebase_auth_token = firebase_auth_token
self.provider = provider
self.id = id
class FirebaseAuthentication(object):
"""
Class that wraps the Firebase SimpleLogin mechanism. Actually this
class does not trigger a connection, simply fakes the auth action.
In addition, the provided email and password information is totally
useless and they never appear in the ``auth`` variable at the server.
"""
def __init__(self, secret, email, debug=False, admin=False, extra=None):
self.authenticator = FirebaseTokenGenerator(secret, debug, admin)
self.email = email
self.provider = 'password'
self.extra = (extra or {}).copy()
self.extra.update({'debug': debug, 'admin': admin,
'email': self.email, 'provider': self.provider})
def get_user(self):
"""
Method that gets the authenticated user. The returning user has
the token, email and the provider data.
"""
token = self.authenticator.create_token(self.extra)
user_id = self.extra.get('id')
return FirebaseUser(self.email, token, self.provider, user_id)
class FirebaseApplication(object):
"""
Class that actually connects with the Firebase backend via HTTP calls.
It fully implements the RESTful specifications defined by Firebase. Data
is transmitted as in JSON format in both ways. This class needs a DSN value
that defines the base URL of the backend, and if needed, authentication
credentials are accepted and then are taken into consideration while
constructing HTTP requests.
There are also the corresponding asynchronous versions of each HTTP method.
The async calls make use of the on-demand process pool defined under the
module `async`.
auth = FirebaseAuthentication(FIREBASE_SECRET, 'firebase@firebase.com', 'fbpw')
firebase = FirebaseApplication('https://firebase.localhost', auth)
That's all there is. Then you start connecting with the backend:
json_dict = firebase.get('/users', '1', {'print': 'pretty'})
print json_dict
{'1': 'John Doe', '2': 'Jane Doe', ...}
Async version is:
firebase.get('/users', '1', {'print': 'pretty'}, callback=log_json_dict)
The callback method is fed with the returning response.
"""
NAME_EXTENSION = '.json'
URL_SEPERATOR = '/'
def __init__(self, dsn, authentication=None):
assert dsn.startswith('https://'), 'DSN must be a secure URL'
self.dsn = dsn
self.authentication = authentication
def _build_endpoint_url(self, url, name=None):
"""
Method that constructs a full url with the given url and the
snapshot name.
Example:
full_url = _build_endpoint_url('/users', '1')
full_url => 'http://firebase.localhost/users/1.json'
"""
if not url.endswith(self.URL_SEPERATOR):
url = url + self.URL_SEPERATOR
if name is None:
name = ''
return '%s%s%s' % (urlparse.urljoin(self.dsn, url), name,
self.NAME_EXTENSION)
def _authenticate(self, params, headers):
"""
Method that simply adjusts authentication credentials for the
request.
`params` is the querystring of the request.
`headers` is the header of the request.
If auth instance is not provided to this class, this method simply
returns without doing anything.
"""
if self.authentication:
user = self.authentication.get_user()
params.update({'auth': user.firebase_auth_token})
headers.update(self.authentication.authenticator.HEADERS)
@http_connection(60)
def get(self, url, name, connection, params=None, headers=None):
"""
Synchronous GET request.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_get_request(endpoint, params, headers, connection=connection)
def get_async(self, url, name, callback=None, params=None, headers=None):
"""
Asynchronous GET request with the process pool.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
process_pool.apply_async(make_get_request,
args=(endpoint, params, headers), callback=callback)
@http_connection(60)
def put(self, url, name, data, connection, params=None, headers=None):
"""
Synchronous PUT request. There will be no returning output from
the server, because the request will be made with ``silent``
parameter. ``data`` must be a JSONable value.
"""
assert name, 'Snapshot name must be specified'
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_put_request(endpoint, data, params, headers,
connection=connection)
def put_async(self, url, name, data, callback=None, params=None, headers=None):
"""
Asynchronous PUT request with the process pool.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_put_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def post(self, url, data, connection, params=None, headers=None):
"""
Synchronous POST request. ``data`` must be a JSONable value.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_post_request(endpoint, data, params, headers,
connection=connection)
def post_async(self, url, data, callback=None, params=None, headers=None):
"""
Asynchronous POST request with the process pool.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_post_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def patch(self, url, data, connection, params=None, headers=None):
"""
Synchronous POST request. ``data`` must be a JSONable value.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_patch_request(endpoint, data, params, headers,
connection=connection)
def patch_async(self, url, data, callback=None, params=None, headers=None):
"""
Asynchronous PATCH request with the process pool.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_patch_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def delete(self, url, name, connection, params=None, headers=None):
"""
Synchronous DELETE request. ``data`` must be a JSONable value.
"""
if not name: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_delete_request(endpoint, params, headers, connection=connection)
def delete_async(self, url, name, callback=None, params=None, headers=None):
"""
Asynchronous DELETE request with the process pool.
"""
if not name: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
process_pool.apply_async(make_delete_request,
args=(endpoint, params, headers), callback=callback)
|
from django.forms import fields
from django.forms import widgets
from djng.forms import field_mixins
from . import widgets as bs3widgets
class BooleanFieldMixin(field_mixins.BooleanFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.BooleanField))
if isinstance(self.widget, widgets.CheckboxInput):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.CheckboxInput):
new_widget = bs3widgets.CheckboxInput(self.label)
new_widget.__dict__, new_widget.choice_label = self.widget.__dict__, new_widget.choice_label
self.label = '' # label is rendered by the widget and not by BoundField.label_tag()
return new_widget
class ChoiceFieldMixin(field_mixins.ChoiceFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.ChoiceField))
if isinstance(self.widget, widgets.RadioSelect):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.RadioSelect):
new_widget = bs3widgets.RadioSelect()
new_widget.__dict__ = self.widget.__dict__
return new_widget
class MultipleChoiceFieldMixin(field_mixins.MultipleChoiceFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.MultipleChoiceField))
if isinstance(self.widget, widgets.CheckboxSelectMultiple):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.CheckboxSelectMultiple):
new_widget = bs3widgets.CheckboxSelectMultiple()
new_widget.__dict__ = self.widget.__dict__
return new_widget
|
from democracy.enums import InitialSectionType
INITIAL_SECTION_TYPE_DATA = [
{
'identifier': InitialSectionType.MAIN,
'name_singular': 'pääosio',
'name_plural': 'pääosiot',
},
{
'identifier': InitialSectionType.CLOSURE_INFO,
'name_singular': 'sulkeutumistiedote',
'name_plural': 'sulkeutumistiedotteet',
},
{
'identifier': InitialSectionType.SCENARIO,
'name_singular': 'vaihtoehto',
'name_plural': 'vaihtoehdot',
},
{
'identifier': InitialSectionType.PART,
'name_singular': 'osa-alue',
'name_plural': 'osa-alueet',
},
]
def create_initial_section_types(section_type_model):
for section in INITIAL_SECTION_TYPE_DATA:
section_type_model.objects.update_or_create(identifier=section['identifier'], defaults=section)
|
import sys, os, os.path, signal
import jsshellhelper
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
class Packer(object):
toolsdir = os.path.dirname(os.path.abspath(__file__))
def run(self, jsshell, filename):
tmpFile = jsshellhelper.createEscapedFile(filename)
cmd = [jsshell,
'-f', os.path.join(self.toolsdir, 'packer.js'),
'-f', os.path.join(self.toolsdir, 'cleaner.js'),
'-f', tmpFile,
'-e', "var input = __unescape_string(); print(pack(input, 62, 1, 0));"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if stdout:
print stdout
else:
print stderr
tmpFile = jsshellhelper.cleanUp(tmpFile)
def main():
parser = OptionParser()
options, args = parser.parse_args()
if len(args) < 2:
print >>sys.stderr, """Usage: %s <path to jsshell> <js file>""" % sys.argv[0]
sys.exit(1)
packer = Packer()
packer.run(args[0], args[1])
if __name__ == '__main__':
main()
|
from test_support import *
print '6. Built-in types'
print '6.1 Truth value testing'
if None: raise TestFailed, 'None is true instead of false'
if 0: raise TestFailed, '0 is true instead of false'
if 0L: raise TestFailed, '0L is true instead of false'
if 0.0: raise TestFailed, '0.0 is true instead of false'
if '': raise TestFailed, '\'\' is true instead of false'
if (): raise TestFailed, '() is true instead of false'
if []: raise TestFailed, '[] is true instead of false'
if {}: raise TestFailed, '{} is true instead of false'
if not 1: raise TestFailed, '1 is false instead of true'
if not 1L: raise TestFailed, '1L is false instead of true'
if not 1.0: raise TestFailed, '1.0 is false instead of true'
if not 'x': raise TestFailed, '\'x\' is false instead of true'
if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
if not [1]: raise TestFailed, '[1] is false instead of true'
if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
def f(): pass
class C: pass
import sys
x = C()
if not f: raise TestFailed, 'f is false instead of true'
if not C: raise TestFailed, 'C is false instead of true'
if not sys: raise TestFailed, 'sys is false instead of true'
if not x: raise TestFailed, 'x is false instead of true'
print '6.2 Boolean operations'
if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
if 1 and 1: pass
else: raise TestFailed, '1 and 1 is false instead of false'
if not 1: raise TestFailed, 'not 1 is true instead of false'
print '6.3 Comparisons'
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: raise TestFailed, 'int comparisons failed'
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: raise TestFailed, 'long int comparisons failed'
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: raise TestFailed, 'float comparisons failed'
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: raise TestFailed, 'string comparisons failed'
if 0 in [0] and 0 not in [1]: pass
else: raise TestFailed, 'membership test failed'
if None is None and [] is not []: pass
else: raise TestFailed, 'identity test failed'
print '6.4 Numeric types (mostly conversions)'
if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
raise TestFailed, 'int/long/float value not equal'
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: raise TestFailed, 'int() does not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
if type(prod) is not int:
raise TestFailed, ("expected type(prod) to be int, not %r" %
type(prod))
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)+1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)-1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
print '6.5.2 Tuples'
if len(()) != 0: raise TestFailed, 'len(())'
if len((1,)) != 1: raise TestFailed, 'len((1,))'
if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*'
if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
else: raise TestFailed, 'in/not in tuple'
print '6.5.3 Lists'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
del a[1:4]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1]
if a != []: raise TestFailed, 'list item deletion [-1]'
a=range(0,5)
del a[1L:4L]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0L]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1L]
if a != []: raise TestFailed, 'list item deletion [-1]'
a.append(0)
a.append(1)
a.append(2)
if a != [0,1,2]: raise TestFailed, 'list append'
a.insert(0, -2)
a.insert(1, -1)
a.insert(2,0)
if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
if a.count(0) != 2: raise TestFailed, ' list count'
if a.index(0) != 2: raise TestFailed, 'list index'
a.remove(0)
if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
a.reverse()
if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
a.sort()
if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
def revcmp(a, b): return cmp(b, a)
a.sort(revcmp)
if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
def myComparison(x,y):
return cmp(x%3, y%7)
z = range(12)
z.sort(myComparison)
a = [0,1,2,3,4]
if a[ -pow(2,128L): 3 ] != [0,1,2]:
raise TestFailed, "list slicing with too-small long integer"
if a[ 3: pow(2,145L) ] != [3,4]:
raise TestFailed, "list slicing with too-large long integer"
print '6.6 Mappings == Dictionaries'
d = {}
if d.keys() != []: raise TestFailed, '{}.keys()'
if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
if ('a' in d) != 0: raise TestFailed, "'a' in {}"
if ('a' not in d) != 1: raise TestFailed, "'a' not in {}"
if len(d) != 0: raise TestFailed, 'len({})'
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed, 'len(dict)'
k = d.keys()
k.sort()
if k != ['a', 'b']: raise TestFailed, 'dict keys()'
if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
else: raise TestFailed, 'dict keys()'
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed, 'dict keys() # in/not in version'
if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
d = {1:1, 2:2, 3:3}
d.clear()
if d != {}: raise TestFailed, 'dict clear'
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
d.clear()
try: d.update(None)
except AttributeError: pass
else: raise TestFailed, 'dict.update(None), AttributeError expected'
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.update(SimpleUserDict())
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict.update(instance)'
d.clear()
class FailingUserDict:
def keys(self):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.keys() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __iter__(self):
raise ValueError
return BogonIter()
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()) expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise ValueError
return BogonIter()
def __getitem__(self, key):
return key
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()).next() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.update(), __getitem__ expected ValueError'
d = {1:1, 2:2, 3:3}
if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy'
if {}.copy() != {}: raise TestFailed, 'empty dict copy'
d = {}
if d.get('c') is not None: raise TestFailed, 'missing {} get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg'
d = {'a' : 1, 'b' : 2}
if d.get('c') is not None: raise TestFailed, 'missing dict get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg'
if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg'
if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg'
d = {}
if d.setdefault('key0') is not None:
raise TestFailed, 'missing {} setdefault, no 2nd arg'
if d.setdefault('key0') is not None:
raise TestFailed, 'present {} setdefault, no 2nd arg'
d.setdefault('key', []).append(3)
if d['key'][0] != 3:
raise TestFailed, 'missing {} setdefault, w/ 2nd arg'
d.setdefault('key', []).append(4)
if len(d['key']) != 2:
raise TestFailed, 'present {} setdefault, w/ 2nd arg'
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[`i`] = i
if copymode < 0:
b[`i`] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
if va != int(ka): raise TestFailed, "a.popitem: %s" % str(ta)
kb, vb = tb = b.popitem()
if vb != int(kb): raise TestFailed, "b.popitem: %s" % str(tb)
if copymode < 0 and ta != tb:
raise TestFailed, "a.popitem != b.popitem: %s, %s" % (
str(ta), str(tb))
if a: raise TestFailed, 'a not empty after popitems: %s' % str(a)
if b: raise TestFailed, 'b not empty after popitems: %s' % str(b)
try: type(1, 2)
except TypeError: pass
else: raise TestFailed, 'type(), w/2 args expected TypeError'
try: type(1, 2, 3, 4)
except TypeError: pass
else: raise TestFailed, 'type(), w/4 args expected TypeError'
|
import pytest
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.addons import save
from mitmproxy.addons import view
def test_configure(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(sa, save_stream_file=str(tmpdir))
with pytest.raises(Exception, match="Invalid filter"):
tctx.configure(
sa, save_stream_file=str(tmpdir.join("foo")), save_stream_filter="~~"
)
tctx.configure(sa, save_stream_filter="foo")
assert sa.filt
tctx.configure(sa, save_stream_filter=None)
assert not sa.filt
def rd(p):
with open(p, "rb") as f:
x = io.FlowReader(f)
return list(x.stream())
def test_tcp(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_end(tt)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_error(tt)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_websocket(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_save_command(tmpdir):
sa = save.Save()
with taddons.context() as tctx:
p = str(tmpdir.join("foo"))
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], "+" + p)
assert len(rd(p)) == 2
with pytest.raises(exceptions.CommandError):
sa.save([tflow.tflow(resp=True)], str(tmpdir))
v = view.View()
tctx.master.addons.add(v)
tctx.master.addons.add(sa)
tctx.master.commands.execute("save.file @shown %s" % p)
def test_simple(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
f = tflow.tflow(resp=True)
sa.request(f)
sa.response(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[0].response
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow(err=True)
sa.request(f)
sa.error(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[1].error
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow()
sa.request(f)
tctx.configure(sa, save_stream_file=None)
assert not rd(p)[2].response
|
from __future__ import absolute_import
from __future__ import print_function
import sys, os, yaml, glob
import subprocess
import pandas as pd
import re
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nougat import common, align
from itertools import groupby
from collections import OrderedDict
def run(global_config, sample_config):
sorted_libraries_by_insert = \
common._sort_libraries_by_insert(sample_config)
_check_libraries(sorted_libraries_by_insert)
computeAssemblyStats(sample_config)
# filter out short contigs
sample_config = _build_new_reference(sample_config)
if "tools" in sample_config:
"""If so, execute them one after the other in the specified order \
(might not work)"""
for command in sample_config["tools"]:
"""with this I pick up at run time the correct function in the \
current module"""
command_fn = getattr(sys.modules[__name__],
"_run_{}".format(command))
"""Update sample config, each command return sample_config and \
if necessary it modifies it"""
sample_config = command_fn(global_config, sample_config,
sorted_libraries_by_insert)
else:
#run default pipeline for QC
sample_config = _run_align(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_qaTools(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_FRC(global_config, sample_config,
sorted_libraries_by_insert)
def _run_align(global_config, sample_config,sorted_libraries_by_insert):
if "reference" not in sample_config:
print("reference sequence not provided, skypping alignment step.",
"Please provide a reference if you are intrested in aligning",
"the reads against a reference")
return sample_config
if not os.path.exists("alignments"):
os.makedirs("alignments")
os.chdir("alignments")
sorted_libraries_by_insert = align._align_reads(global_config,
sample_config, sorted_libraries_by_insert) # align reads
sorted_alignments_by_insert = align._merge_bam_files(global_config,
sample_config, sorted_libraries_by_insert) # merge alignments
sorted_alignments_by_insert = align.picard_CGbias(global_config,
sample_config,sorted_alignments_by_insert) # compute picard stats
sorted_alignments_by_insert = align.picard_collectInsertSizeMetrics(
global_config, sample_config,sorted_alignments_by_insert)
sorted_alignments_by_insert = align.picard_markDuplicates(global_config,
sample_config,sorted_alignments_by_insert)
os.chdir("..")
sample_config["alignments"] = sorted_alignments_by_insert
return sample_config
def _check_libraries(sorted_libraries_by_insert):
different_inserts = 0
current_insert = -1
orientation = ""
for library, libraryInfo in sorted_libraries_by_insert:
if current_insert == -1:
current_insert = libraryInfo["insert"]
different_inserts = 1
else :
if current_insert != libraryInfo["insert"]:
current_insert = libraryInfo["insert"]
different_inserts += 1
if different_inserts > 2:
sys.exit("error: in valiadation only two libraries are admitted "
"usually a PE and a MP, sometimes 2 PE)")
return
def _build_new_reference(sample_config):
minCtgLength = 500
if "minCtgLength" in sample_config:
minCtgLength = sample_config["minCtgLength"]
if minCtgLength < 500:
sys.exit("min contig length must be higher than 500bp, lower "
"values will complicate the job of valiadation tools and "
"make results difficult to interpret. For mammalian "
"genomes minCtgLength > 1Kbp is strongly suggested")
reference = sample_config["reference"]
reference_dir = os.path.abspath("reference")
if not os.path.exists(reference_dir):
os.makedirs(reference_dir)
os.chdir(reference_dir)
new_reference_name = os.path.abspath(os.path.basename(reference))
if os.path.exists(new_reference_name):
sample_config["reference"] = new_reference_name
os.chdir("..")
return sample_config # already created the new reference
with open(new_reference_name, "w") as new_ref_fd:
with open(reference, "r") as ref_fd:
fasta_header = ref_fd.readline()
sequence = ""
for line in ref_fd:
line = line
if line.startswith(">"):
if len(sequence) >= minCtgLength:
new_ref_fd.write(fasta_header)
new_ref_fd.write(sequence)
sequence = ""
fasta_header = line
else:
sequence+=line
if len(sequence) >= minCtgLength:
new_ref_fd.write(fasta_header)
new_ref_fd.write(sequence)
sample_config["reference"] = new_reference_name
os.chdir("..")
return sample_config
def _run_BUSCO(global_config, sample_config, sorted_alignments_by_insert):
program = global_config["Tools"]["BUSCO"]["bin"]
options = global_config["Tools"]["BUSCO"]["options"]
main_dir = os.getcwd()
BUSCOfolder = os.path.join(main_dir, "BUSCO")
if not os.path.exists(BUSCOfolder):
os.makedirs(BUSCOfolder)
os.chdir(BUSCOfolder)
BUSCO_data_path = os.path.expandvars(sample_config["BUSCODataPath"])
if not os.path.exists(BUSCO_data_path):
raise IOError("Path to the BUSCO data set does not exist!")
reference = sample_config["reference"]
output = sample_config["output"]
threads = sample_config.get("threads", 16)
command = [program, "-l", BUSCO_data_path, "-i", "{}".format(reference), "-o", "{}".format(output),
"-c", "{}".format(threads)]
command.extend(options)
common.print_command(command)
outfile = os.path.join(BUSCOfolder, "run_{}".format(output),
"short_summary_{}".format(output))
if not common.check_dryrun(sample_config) and not os.path.exists(outfile):
stdOut = open("BUSCO.stdOut", "a")
stdErr = open("BUSCO.stdErr", "a")
return_value = subprocess.call(command, stdout=stdOut, stderr=stdErr)
if not return_value == 0:
sys.exit("Error running BUSCO")
os.chdir("..")
def _run_FRC(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
FRCurveFolder = os.path.join(os.getcwd(), "FRCurve")
if not os.path.exists(FRCurveFolder):
os.makedirs(FRCurveFolder)
os.chdir("FRCurve")
program=global_config["Tools"]["FRC"]["bin"]
genomeSize = sample_config["genomeSize"]
reference = sample_config["reference"]
output = sample_config["output"]
alignments = sample_config["alignments"]
peBam = alignments[0][1]
peInsert = alignments[0][0]
peMinInsert = int(peInsert - peInsert*0.60)
peMaxInsert = int(peInsert + peInsert*0.60)
command = [program, "--pe-sam", peBam, "--pe-max-insert", "5000"]
if len(alignments) > 1:
mpBam = alignments[1][1]
mpInsert = alignments[1][0]
mpMinInsert = int(mpInsert - mpInsert*0.50)
mpMaxInsert = int(mpInsert + mpInsert*0.50)
command += ["--mp-sam", mpBam, "--mp-max-insert", "25000"]
command += [ "--genome-size", "{}".format(genomeSize), "--output", output]
common.print_command(command)
if not common.check_dryrun(sample_config) and not os.path.exists(
"{}_FRC.png".format(output)):
stdOut = open("FRC.stdOut", "a")
stdErr = open("FRC.stdErr", "a")
returnValue = subprocess.call(command , stdout=stdOut , stderr=stdErr)
if not returnValue == 0:
sys.exit("error, while running FRCurve: {}".format(command))
plotFRCurve(output)
os.chdir("..")
return sample_config
def plotFRCurve(output):
names = ["_FRC", "COMPR_MP_FRC", "COMPR_PE_FRC", "HIGH_COV_PE_FRC",
"HIGH_NORM_COV_PE_FRC", "HIGH_OUTIE_MP_FRC", "HIGH_OUTIE_PE_FRC",
"HIGH_SINGLE_MP_FRC", "HIGH_SINGLE_PE_FRC", "HIGH_SPAN_MP_FRC",
"HIGH_SPAN_PE_FRC", "LOW_COV_PE_FRC", "LOW_NORM_COV_PE_FRC",
"STRECH_MP_FRC", "STRECH_PE_FRC"]
for name in names:
FRC_data = pd.io.parsers.read_csv("{}{}.txt".format(output, name),
sep=' ', header=None)
FRC_features = FRC_data[FRC_data.columns[0]].tolist()
FRC_coverage = FRC_data[FRC_data.columns[1]].tolist()
plt.plot(FRC_features, FRC_coverage)
if name == "_FRC":
plt.title('Feature Resonse Curve -- All Features')
else:
plt.title('Feature Resonse Curve -- {}'.format(name))
plt.plot(FRC_features, FRC_coverage)
plt.savefig("{}{}.png".format(output, name))
plt.clf()
return 0
def _run_qaTools(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
qaToolsFolder = os.path.join(os.getcwd(), "QAstats")
if not os.path.exists(qaToolsFolder):
os.makedirs(qaToolsFolder)
os.chdir("QAstats")
program=global_config["Tools"]["qaTools"]["bin"]
genomeSize = sample_config["genomeSize"]
reference = sample_config["reference"]
output = sample_config["output"]
alignments = sample_config["alignments"][0]
BAMfile = alignments[1]
command = ["{}".format(program), "-m", "-q", "0", "-i", BAMfile,
"{}.cov".format(os.path.basename(BAMfile))]
common.print_command(command)
if not common.check_dryrun(sample_config) and not os.path.exists(
"{}.cov".format(os.path.basename(BAMfile))):
stdOut = open("QAtools.stdOut", "a")
stdErr = open("QAtools.stdErr", "a")
returnValue = subprocess.call(command , stdout=stdOut , stderr=stdErr)
if not returnValue == 0:
sys.exit("error, while running QAtools: {}".format(command))
#now add GC content
QAtools_dict = {}
header = ""
with open( "{}.cov".format(os.path.basename(BAMfile)), "r") as QA_csv:
header = QA_csv.readline().rstrip()
for line in QA_csv:
line = line.strip().split("\t")
QAtools_dict[line[0]] = [line[1],line[2],line[3]]
QA_GC_file = "{}.cov.gc".format(os.path.basename(BAMfile))
with open(QA_GC_file, "w") as QA_GC_fd:
QA_GC_fd.write("{}\tGCperc\n".format(header))
with open(reference, "r") as ref_fd:
fasta_raw_header = ref_fd.readline().strip()
fasta_raw_header = fasta_raw_header.split(" ")[0]
fasta_raw_header = fasta_raw_header.split("\t")[0]
fasta_header = fasta_raw_header.split(">")[1]
sequence = ""
for line in ref_fd:
line = line.strip()
if line.startswith(">"):
GC = computeGC(sequence)
if fasta_header not in QAtools_dict:
sys.exit("error while parsing QAcompute output: "
"probably some wired contig name is "
"present in your assmebly file")
QA_GC_fd.write("{}\t{}\t{}\t{}\t{}\n".format(
fasta_header, QAtools_dict[fasta_header][0],
QAtools_dict[fasta_header][1],
QAtools_dict[fasta_header][2], GC))
sequence = ""
fasta_raw_header = line.split(" ")[0]
fasta_raw_header = fasta_raw_header.split("\t")[0]
fasta_header = fasta_raw_header.split(">")[1]
else:
sequence+=line
GC = computeGC(sequence)
if fasta_header not in QAtools_dict:
sys.exit("error while parsing QAcompute output: probably "
"some wired contig name is present in your "
"assmebly file")
QA_GC_fd.write("{}\t{}\t{}\t{}\t{}\n".format(fasta_header,
QAtools_dict[fasta_header][0],
QAtools_dict[fasta_header][1],
QAtools_dict[fasta_header][2], GC))
plotQA(QA_GC_file)
os.chdir("..")
return sample_config
def plotQA(QA_GC_file):
#QA_GC_file="lib_500.bam.cov.gc"
import shutil as sh
sh.copy(QA_GC_file, "Contigs_Cov_SeqLen_GC.csv")
QA_data = pd.io.parsers.read_csv("Contigs_Cov_SeqLen_GC.csv",
sep='\t', header=0)
GCperc = QA_data['GCperc'].tolist()
MedianCov = QA_data['Median_Cov'].tolist()
SeqLen = QA_data['Seq_len'].tolist()
Mean_MedianCov = sum(MedianCov) / float(len(MedianCov))
Max_MedianCov = max(MedianCov)
if Max_MedianCov > 2.5* Mean_MedianCov:
Max_MedianCov = Mean_MedianCov*2
#GC_vs_Median Coverage
plt.plot(GCperc, MedianCov, 'or')
plt.title('GC content vs Median Coverage')
plt.xlabel('%GC')
plt.ylabel('Coverage')
plotname = "GC_vs_Coverage.png"
plt.savefig(plotname)
plt.clf()
# GC_vs_median eliminate outliers
plt.plot(GCperc, MedianCov, 'or')
plt.ylim((10, Max_MedianCov))
plt.title('GC content vs Median Coverage')
plt.xlabel('%GC')
plt.ylabel('Coverage')
plotname = "GC_vs_Coverage_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#Coverage Distribution Histogram
try:
n, bins, patches = plt.hist(MedianCov, 100, facecolor='g')
except ValueError:
n, bins, patches = plt.hist(MedianCov, bins=range(0, max(MedianCov)+100, 10), facecolor='g')
plt.xlabel('Coverage')
plt.ylabel('Frequency')
plt.title('Coverage Distribution')
plotname = "Coverage_distribution.png"
plt.savefig(plotname)
plt.clf()
#Coverage Distribution Histogram eliminate outliers
n, bins, patches = plt.hist(MedianCov, 100, facecolor='g',
range=(4,Max_MedianCov))
plt.xlabel('Coverage')
plt.ylabel('Frequency')
plt.title('Coverage Distribution')
plotname = "Coverage_distribution_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#Median Cov vs Sequence Length
plt.plot(MedianCov, map(lambda x: x/1000, SeqLen), 'ro')
plt.title('Median Coverage vs Contig Length')
plt.xlabel('Median Coverage')
plt.ylabel('Contig Length (Kbp)')
plotname = "MedianCov_vs_CtgLength.png"
plt.savefig(plotname)
plt.clf()
#Median Cov vs Sequence Length eliminate outliers
plt.plot(MedianCov, map(lambda x: x/1000, SeqLen), 'ro')
plt.xlim((10, Max_MedianCov))
plt.title('Median Coverage vs Contig Length')
plt.xlabel('Median Coverage')
plt.ylabel('Contig Length (Kbp)')
plotname = "MedianCov_vs_CtgLength_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#GC content vs Contig length
plt.plot(GCperc, map(lambda x: x/1000, SeqLen), 'ro')
plt.title('%GC vs Contig Length')
plt.xlabel('%GC')
plt.ylabel('Contig Length (Kbp)')
plotname = "GC_vs_CtgLength.png"
plt.savefig(plotname)
plt.clf()
return 0
def computeGC(sequence):
gcCount = len(re.findall("[GC]", sequence)) + len(
re.findall("[gc]", sequence))
totalBaseCount = len(re.findall("[GCTA]", sequence)) + len(
re.findall("[gcta]", sequence))
gcFraction = float(gcCount) / totalBaseCount
return gcFraction
def computeAssemblyStats(sample_config):
outfile = os.path.join("contig_stats", "contiguity.out")
if not os.path.exists("contig_stats"):
os.makedirs("contig_stats")
minlength = sample_config.get("minCtgLength", 1000)
sequence = sample_config["reference"]
genomesize = sample_config["genomeSize"]
ctg = re.sub("scf.fasta$", "ctg.fasta", sequence)
scf = re.sub("ctg.fasta$", "scf.fasta", sequence)
def asm_stats(sequence):
stats = OrderedDict()
stats["assembly type"] = ""
stats["# sequences"] = 0
stats["assembly length"] = 0
stats["trim shorter than(bp)"] = minlength
stats["# trimmed sequences"] = 0
stats["trimmed assembly length"] = 0
stats["N50"] = 0
stats["N80"] = 0
stats["NG50"] = 0
stats["NG80"] = 0
stats["longest seq"] = 0
sequence_lengths = []
with open(sequence, "r") as seq_file:
# Groupby iterator. Should work for fasta of any column width
fai = groupby(seq_file, lambda x: x.startswith(">"))
while True:
try:
_, header = next(fai)
_, sequence = next(fai)
except StopIteration:
break
# Collect fasta sequence stats
seq_len = sum([len(i.strip()) for i in sequence])
sequence_lengths.append(seq_len)
stats["# sequences"] += 1
stats["assembly length"] += seq_len
if seq_len > minlength:
stats["# trimmed sequences"] += 1
stats["trimmed assembly length"] += seq_len
if seq_len > stats["longest seq"]:
stats["longest seq"] = seq_len
sequence_lengths = sorted(sequence_lengths, reverse=True)
test_sum = 0
for sequence in sequence_lengths:
test_sum += sequence
if stats["assembly length"] * 0.5 < test_sum and stats["N50"] is 0:
stats["N50"] = sequence
if stats["assembly length"] * 0.8 < test_sum and stats["N80"] is 0:
stats["N80"] = sequence
if genomesize * 0.5 < test_sum and stats["NG50"] is 0:
stats["NG50"] = sequence
if genomesize * 0.8 < test_sum and stats["NG80"] is 0:
stats["NG80"] = sequence
return stats
ctg_stats = asm_stats(ctg)
ctg_stats["assembly type"] = "contigs"
scf_stats = asm_stats(scf)
scf_stats["assembly type"] = "scaffolds"
with open(outfile, "w") as out:
out.write('\t'.join(ctg_stats.keys()))
out.write('\n')
for asm in [ctg_stats, scf_stats]:
out.write('\t'.join(map(str, asm.values())))
out.write('\n')
|
"""
Django settings for huts project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '1&1@xh%(guq+b#1&jv$e6pa9n6sm_w#9cia1)(+idj1)omok(*'
DEBUG = True
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hut',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIDDLEWARE_CLASSES = []
ROOT_URLCONF = 'huts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'huts.wsgi.application'
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': dj_database_url.config(default='postgres://nxhxalrlqkckbd:1f8179624d9a773c8de38b1303b149283dfd58238fb10d0509cb85be49edcc2a@ec2-54-247-99-159.eu-west-1.compute.amazonaws.com:5432/d9tipol4jem759')
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../static_cdn')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../media_cdn')
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 2,
}
|
"""
.. module:: decorators
:platform: Unix, Windows
:synopsis: Decorators for SublimePython plugin
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import os
import functools
def debug(f):
@functools.wrap(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
import traceback
with open(os.path.expanduser("~/trace"), "w") as fl:
traceback.print_exc(file=fl)
return wrapped
|
import unittest
from programy.processors.post.denormalize import DenormalizePostProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
class DenormalizeTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
self.bot.brain.denormals.process_splits([" dot com ",".com"])
def test_denormalize(self):
processor = DenormalizePostProcessor ()
result = processor.process(self.bot, "testid", "Hello")
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
result = processor.process(self.bot, "testid", "hello dot com")
self.assertIsNotNone(result)
self.assertEqual("hello.com", result)
|
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("assignments", "0015_assignmentvote_delegated_user"),
]
operations = [
migrations.AddField(
model_name="assignmentpoll",
name="db_amount_global_yes",
field=models.DecimalField(
blank=True,
decimal_places=6,
default=Decimal("0"),
max_digits=15,
null=True,
validators=[django.core.validators.MinValueValidator(Decimal("-2"))],
),
),
migrations.AddField(
model_name="assignmentpoll",
name="global_yes",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("votes", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="onehundred_percent_base",
field=models.CharField(
choices=[
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
("Y", "Sum of votes including general No/Abstain"),
("valid", "All valid ballots"),
("cast", "All casted ballots"),
("disabled", "Disabled (no percents)"),
],
max_length=8,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("Y", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
]
|
import frappe
from frappe.utils import cstr
def execute():
# Update Social Logins in User
run_patch()
# Create Social Login Key(s) from Social Login Keys
frappe.reload_doc("integrations", "doctype", "social_login_key", force=True)
if not frappe.db.exists('DocType', 'Social Login Keys'):
return
social_login_keys = frappe.get_doc("Social Login Keys", "Social Login Keys")
if social_login_keys.get("facebook_client_id") or social_login_keys.get("facebook_client_secret"):
facebook_login_key = frappe.new_doc("Social Login Key")
facebook_login_key.get_social_login_provider("Facebook", initialize=True)
facebook_login_key.social_login_provider = "Facebook"
facebook_login_key.client_id = social_login_keys.get("facebook_client_id")
facebook_login_key.client_secret = social_login_keys.get("facebook_client_secret")
if not (facebook_login_key.client_secret and facebook_login_key.client_id):
facebook_login_key.enable_social_login = 0
facebook_login_key.save()
if social_login_keys.get("frappe_server_url"):
frappe_login_key = frappe.new_doc("Social Login Key")
frappe_login_key.get_social_login_provider("Frappe", initialize=True)
frappe_login_key.social_login_provider = "Frappe"
frappe_login_key.base_url = social_login_keys.get("frappe_server_url")
frappe_login_key.client_id = social_login_keys.get("frappe_client_id")
frappe_login_key.client_secret = social_login_keys.get("frappe_client_secret")
if not (frappe_login_key.client_secret and frappe_login_key.client_id and frappe_login_key.base_url):
frappe_login_key.enable_social_login = 0
frappe_login_key.save()
if social_login_keys.get("github_client_id") or social_login_keys.get("github_client_secret"):
github_login_key = frappe.new_doc("Social Login Key")
github_login_key.get_social_login_provider("GitHub", initialize=True)
github_login_key.social_login_provider = "GitHub"
github_login_key.client_id = social_login_keys.get("github_client_id")
github_login_key.client_secret = social_login_keys.get("github_client_secret")
if not (github_login_key.client_secret and github_login_key.client_id):
github_login_key.enable_social_login = 0
github_login_key.save()
if social_login_keys.get("google_client_id") or social_login_keys.get("google_client_secret"):
google_login_key = frappe.new_doc("Social Login Key")
google_login_key.get_social_login_provider("Google", initialize=True)
google_login_key.social_login_provider = "Google"
google_login_key.client_id = social_login_keys.get("google_client_id")
google_login_key.client_secret = social_login_keys.get("google_client_secret")
if not (google_login_key.client_secret and google_login_key.client_id):
google_login_key.enable_social_login = 0
google_login_key.save()
frappe.delete_doc("DocType", "Social Login Keys")
def run_patch():
frappe.reload_doc("core", "doctype", "user", force=True)
frappe.reload_doc("core", "doctype", "user_social_login", force=True)
users = frappe.get_all("User", fields=["*"], filters={"name":("not in", ["Administrator", "Guest"])})
for user in users:
idx = 0
if user.frappe_userid:
insert_user_social_login(user.name, user.modified_by, 'frappe', idx, userid=user.frappe_userid)
idx += 1
if user.fb_userid or user.fb_username:
insert_user_social_login(user.name, user.modified_by, 'facebook', idx, userid=user.fb_userid, username=user.fb_username)
idx += 1
if user.github_userid or user.github_username:
insert_user_social_login(user.name, user.modified_by, 'github', idx, userid=user.github_userid, username=user.github_username)
idx += 1
if user.google_userid:
insert_user_social_login(user.name, user.modified_by, 'google', idx, userid=user.google_userid)
idx += 1
def insert_user_social_login(user, modified_by, provider, idx, userid=None, username=None):
source_cols = get_standard_cols()
creation_time = frappe.utils.get_datetime_str(frappe.utils.get_datetime())
values = [
frappe.generate_hash(length=10),
creation_time,
creation_time,
user,
modified_by,
user,
"User",
"social_logins",
cstr(idx),
provider
]
if userid:
source_cols.append("userid")
values.append(userid)
if username:
source_cols.append("username")
values.append(username)
query = """INSERT INTO `tabUser Social Login` (`{source_cols}`)
VALUES ({values})
""".format(
source_cols = "`, `".join(source_cols),
values= ", ".join([frappe.db.escape(d) for d in values])
)
frappe.db.sql(query)
def get_provider_field_map():
return frappe._dict({
"frappe": ["frappe_userid"],
"facebook": ["fb_userid", "fb_username"],
"github": ["github_userid", "github_username"],
"google": ["google_userid"],
})
def get_provider_fields(provider):
return get_provider_field_map().get(provider)
def get_standard_cols():
return ["name", "creation", "modified", "owner", "modified_by", "parent", "parenttype", "parentfield", "idx", "provider"]
|
from .extensions import db, resizer
class Upload(db.Model):
__tablename__ = 'upload'
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.Unicode(255), nullable=False)
url = db.Column(db.Unicode(255), nullable=False)
if resizer:
for size in resizer.sizes.iterkeys():
setattr(Upload, size + '_name', db.Column(db.Unicode(255)))
setattr(Upload, size + '_url', db.Column(db.Unicode(255)))
|
import math
import numpy as np
import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[0,1.2,
1849,1.2,
1850,1.2,
1851,1.2,
1852,1.2,
1853,1.2,
1854,1.2,
1855,1.2,
1856,1.2,
1857,1.2,
1858,1.2,
1859,1.2,
1860,1.2,
1861,1.2,
1862,1.2,
1863,1.2,
1864,1.2,
1865,1.2,
1866,1.2,
1867,1.2,
1868,1.2,
1869,1.2,
1870,1.2,
1871,1.2,
1872,1.2,
1873,1.2,
1874,1.2,
1875,1.2,
1876,1.2,
1877,1.2,
1878,1.2,
1879,1.2,
1880,1.2,
1881,1.2,
1882,1.2,
1883,1.2,
1884,1.2,
1885,1.2,
1886,1.2,
1887,1.2,
1888,1.2,
1889,1.2,
1890,1.2,
1891,1.2,
1892,1.2,
1893,1.2,
1894,1.2,
1895,1.2,
1896,1.2,
1897,1.2,
1898,1.2,
1899,1.2,
1900,1.2,
1901,1.2,
1902,1.2,
1903,1.2,
1904,1.2,
1905,1.2,
1906,1.2,
1907,1.2,
1908,1.2,
1909,1.2,
1910,1.2,
1911,1.2,
1912,1.2,
1913,1.2,
1914,1.2,
1915,1.2,
1916,1.2,
1917,1.2,
1918,1.2,
1919,1.2,
1920,1.2,
1921,1.2,
1922,1.2,
1923,1.2,
1924,1.2,
1925,1.2,
1926,1.2,
1927,1.2,
1928,1.2,
1929,1.2,
1930,1.2,
1931,1.2,
1932,1.2,
1933,1.2,
1934,1.2,
1935,1.2,
1936,1.2,
1937,1.2,
1938,1.2,
1939,1.2,
1940,1.2,
1941,1.2,
1942,1.2,
1943,1.2,
1944,1.2,
1945,1.2,
1946,1.2,
1947,1.2,
1948,1.2,
1949,1.2,
1950,1.2,
1951,1.2,
1952,1.2,
1953,1.2,
1954,1.2,
1955,1.2,
1956,1.2,
1957,1.2,
1958,1.2,
1959,1.2,
1960,1.2,
1961,1.2,
1962,1.2,
1963,1.2,
1964,1.2,
1965,1.2,
1966,1.2,
1967,1.2,
1968,1.2,
1969,1.2,
1970,1.2,
1971,1.2,
1972,1.2,
1973,1.2,
1974,1.2,
1975,1.2,
1976,1.2,
1977,1.2,
1978,1.2,
1979,0.85,
1980,0.85,
1981,0.85,
1982,0.85,
1983,0.85,
1984,0.4,
1985,0.4,
1986,0.4,
1987,0.4,
1988,0.4,
1989,0.4,
1990,0.4,
1991,0.4,
1992,0.4,
1993,0.4,
1994,0.4,
1995,0.4,
1996,0.4,
1997,0.4,
1998,0.4,
1999,0.4,
2000,0.4,
2001,0.4,
2002,0.4,
2003,0.4,
2004,0.4,
2005,0.4,
2006,0.4,
2007,0.4,
2008,0.4,
2009,0.4,
2010,0.4,
2011,0.4,
2012,0.4,
2013,0.4,
2014,0.4,
2015,0.4,
2016,0.4,
2017,0.4,
2018,0.4,
2019,0.4,
2020,0.4,
2021,0.15,
2022,0.15,
2023,0.15,
2024,0.15,
2025,0.15,
2026,0.15,
2027,0.15,
2028,0.15,
2029,0.15,
2030,0.15,
2031,0.15,
2032,0.15,
2033,0.15,
2034,0.15,
2035,0.15,
2036,0.15,
2037,0.15,
2038,0.15,
2039,0.15,
2040,0.15,
2041,0.15,
2042,0.15,
2043,0.15,
2044,0.15,
2045,0.15,
2046,0.15,
2047,0.15,
2048,0.15,
2049,0.15,
2050,0.1,
2051,0.1])
return(l_lookup.lookup(xin))
|
"""
***************************************************************************
FileSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QFileDialog
from qgis.PyQt.QtCore import QSettings
from processing.tools.system import isWindows
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class FileSelectionPanel(BASE, WIDGET):
def __init__(self, isFolder, ext=None):
super(FileSelectionPanel, self).__init__(None)
self.setupUi(self)
self.ext = ext or '*'
self.isFolder = isFolder
self.btnSelect.clicked.connect(self.showSelectionDialog)
def showSelectionDialog(self):
# Find the file dialog's working directory
settings = QSettings()
text = self.leText.text()
if os.path.isdir(text):
path = text
elif os.path.isdir(os.path.dirname(text)):
path = os.path.dirname(text)
elif settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
if self.isFolder:
folder = QFileDialog.getExistingDirectory(self,
self.tr('Select folder'), path)
if folder:
self.leText.setText(folder)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(folder))
else:
filenames = QFileDialog.getOpenFileNames(self,
self.tr('Select file'), path, '*.' + self.ext)
if filenames:
self.leText.setText(u';'.join(filenames))
settings.setValue('/Processing/LastInputPath',
os.path.dirname(filenames[0]))
def getValue(self):
s = self.leText.text()
if isWindows():
s = s.replace('\\', '/')
return s
def setText(self, text):
self.leText.setText(text)
|
"""
N Classic Base renderer Ext renderer
20 0.22 0.14 0.14
100 0.16 0.14 0.13
1000 0.45 0.26 0.17
10000 3.30 1.31 0.53
50000 19.30 6.53 1.98
"""
from pylab import *
import time
for N in (20,100,1000,10000,50000):
tstart = time.time()
x = 0.9*rand(N)
y = 0.9*rand(N)
s = 20*rand(N)
scatter(x,y,s)
print '%d symbols in %1.2f s' % (N, time.time()-tstart)
|
""" Showing last hour history of FTS transfers. """
import sys
import DIRAC
from DIRAC import gLogger, gConfig, S_OK
from DIRAC.Core.Base import Script
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client import PathFinder
__RCSID__ = "$Id$"
colors = { "yellow" : "\033[93m%s\033[0m",
"red" : "\033[91m%s\033[0m" }
gProblematic = False
def showChannels():
""" print info about the last hour performance of FTS system """
global gProblematic
taSection = PathFinder.getAgentSection("DataManagement/TransferAgent")
timeScale = gConfig.getOption( taSection + "/ThroughputTimescale", 3600 )
if not timeScale["OK"]:
gLogger.error( timeScale["Message"] )
DIRAC.exit(1)
timeScale = int( timeScale["Value"] )
accFailureRate = gConfig.getOption( taSection + "/StrategyHandler/AcceptableFailureRate", 75 )
if not accFailureRate["OK"]:
gLogger.error( accFailureRate["Message"] )
accFailureRate = int( accFailureRate["Value"] )
accFailedFiles = gConfig.getOption( taSection + "/StrategyHandler/AcceptableFailedFiles", 5 )
if not accFailedFiles["OK"]:
gLogger.error( accFailedFiles["Message"] )
accFailedFiles = int( accFailedFiles["Value"] )
scInfo = "timescale = %s s\nacc failure rate = %s %%\nacc distinct failed files = %s" % ( timeScale,
accFailureRate,
accFailedFiles )
## db monitor
transferDB = RPCClient( "DataManagement/TransferDBMonitoring" )
## get channels
channels = transferDB.getChannelQueues()
if not channels["OK"]:
gLogger.error( channels["Message"] )
DIRAC.exit(1)
channels = channels["Value"]
## gend bandwidths
bands = transferDB.getChannelObservedThroughput( timeScale )
if not bands["OK"]:
gLogger.error( bands["Message"] )
DIRAC.exit(1)
bands = bands["Value"]
## get failed files
badFiles = transferDB.getCountFileToFTS( timeScale, "Failed" )
if not badFiles["OK"]:
gLogger.error( badFiles["Message"] )
DIRAC.exit(1)
badFiles = badFiles["Value"] if badFiles["Value"] else {}
colorize = sys.stdout.isatty()
header = " %2s | %-15s | %8s | %8s | %8s | %8s | %8s | %12s | %8s | %8s" % ( "Id", "Name", "Status",
"Waiting", "Success", "Failed",
"FilePut", "ThroughPut", "FailRate",
"FailedFiles" )
dashLine = "-"*len(header)
lineTemplate = " %2s | %-15s | %8s | %8d | %8d | %8d | %8.2f | %12.2f | %8.2f | %8d"
printOut = []
for chId, channel in channels.items():
name = channel["ChannelName"]
color = None
status = channel["Status"]
if status == "Active":
status = "OK"
waitingFiles = channel["Files"]
waitingSize = channel["Size"]
failedFiles = successFiles = filePut = througPut = fRate = 0
fFiles = 0
if chId in badFiles:
fFiles = int(badFiles[chId])
if chId in bands:
band = bands[chId]
failedFiles = int(band["FailedFiles"])
successFiles = int(band["SuccessfulFiles"])
filePut = band["Fileput"]
throughPut = band["Throughput"]
if failedFiles or successFiles:
fRate = 100.0 * float( failedFiles ) / ( float(failedFiles) + float( successFiles) )
if fRate > 0 and colorize:
color = "yellow"
status = "Poor"
if fRate > accFailureRate and fFiles > accFailedFiles:
status = "Closed"
if colorize:
color = "red"
if gProblematic and not fRate:
continue
if colorize and color:
line = colors[color] % lineTemplate
else:
line = lineTemplate
printOut.append( line % ( chId, name, status,
waitingFiles if waitingFiles else 0,
successFiles if successFiles else 0,
failedFiles if failedFiles else 0,
filePut if filePut else 0,
throughPut if througPut else 0,
fRate if fRate else 0,
fFiles if fFiles else 0 ) )
if printOut:
printOut = [ scInfo, header, dashLine ] + printOut
for line in printOut:
gLogger.always( line )
else:
gLogger.always("Noting to display...")
def setProblematic( problematic=False ):
""" callback for showing only problematic channels """
global gProblematic
gProblematic = True
return S_OK()
if __name__ == "__main__":
Script.registerSwitch( "p", "problematic", "show only problematic channels", setProblematic )
Script.parseCommandLine()
showChannels()
|
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.request import jsonified, getParam
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
import datetime
import random
import re
import time
import traceback
log = CPLog(__name__)
class Searcher(Plugin):
in_progress = False
def __init__(self):
addEvent('searcher.all', self.allMovies)
addEvent('searcher.single', self.single)
addEvent('searcher.correct_movie', self.correctMovie)
addEvent('searcher.download', self.download)
addEvent('searcher.try_next_release', self.tryNextRelease)
addEvent('searcher.could_be_released', self.couldBeReleased)
addApiView('searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
'params': {
'id': {'desc': 'The id of the movie'},
},
})
addApiView('searcher.full_search', self.allMoviesView, docs = {
'desc': 'Starts a full search for all wanted movies',
})
addApiView('searcher.progress', self.getProgress, docs = {
'desc': 'Get the progress of current full search',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
}"""},
})
addEvent('app.load', self.setCrons)
addEvent('setting.save.searcher.cron_day.after', self.setCrons)
addEvent('setting.save.searcher.cron_hour.after', self.setCrons)
addEvent('setting.save.searcher.cron_minute.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.cron', 'searcher.all', self.allMovies, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
def allMoviesView(self):
in_progress = self.in_progress
if not in_progress:
fireEventAsync('searcher.all')
fireEvent('notify.frontend', type = 'searcher.started', data = True, message = 'Full search started')
else:
fireEvent('notify.frontend', type = 'searcher.already_started', data = True, message = 'Full search already in progress')
return jsonified({
'success': not in_progress
})
def getProgress(self):
return jsonified({
'progress': self.in_progress
})
def allMovies(self):
if self.in_progress:
log.info('Search already in progress')
return
self.in_progress = True
db = get_session()
movies = db.query(Movie).filter(
Movie.status.has(identifier = 'active')
).all()
random.shuffle(movies)
self.in_progress = {
'total': len(movies),
'to_go': len(movies),
}
try:
search_types = self.getSearchTypes()
for movie in movies:
movie_dict = movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
})
try:
self.single(movie_dict, search_types)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
self.in_progress['to_go'] -= 1
# Break if CP wants to shut down
if self.shuttingDown():
break
except SearchSetupError:
pass
self.in_progress = False
def single(self, movie, search_types = None):
# Find out search type
try:
if not search_types:
search_types = self.getSearchTypes()
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
if not movie['profile'] or movie['status_id'] == done_status.get('id'):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
return
db = get_session()
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('library.update_release_date', identifier = movie['library']['identifier'], merge = True)
available_status, ignored_status = fireEvent('status.get', ['available', 'ignored'], single = True)
found_releases = []
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('movie.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
for quality_type in movie['profile']['types']:
if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates):
log.info('Too early to search for %s, %s', (quality_type['quality']['identifier'], default_title))
continue
has_better_quality = 0
# See if better quality is available
for release in movie['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = []
for search_type in search_types:
type_results = fireEvent('%s.search' % search_type, movie, quality, merge = True)
if type_results:
results += type_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
download_preference = self.conf('preferred_method')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['type'], reverse = (download_preference == 'torrent'))
# Check if movie isn't deleted while searching
if not db.query(Movie).filter_by(id = movie.get('id')).first():
break
# Add them to this movie releases list
for nzb in sorted_results:
nzb_identifier = md5(nzb['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = nzb_identifier,
movie_id = movie.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in nzb:
try:
if not isinstance(nzb[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(nzb[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
nzb['status_id'] = rls.status_id
for nzb in sorted_results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name']))
continue
if nzb['status_id'] == ignored_status.get('id'):
log.info('Ignored: %s', nzb['name'])
continue
if nzb['score'] <= 0:
log.info('Ignored, score to low: %s', nzb['name'])
continue
downloaded = self.download(data = nzb, movie = movie)
if downloaded is True:
ret = True
break
elif downloaded != 'try_next':
break
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('movie.restatus', movie['id'])
break
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
fireEvent('notify.frontend', type = 'searcher.ended.%s' % movie['id'], data = True)
return ret
def download(self, data, movie, manual = False):
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
done_status = fireEvent('status.get', 'done', single = True)
rls.status_id = done_status.get('id') if not renamer_enabled else snatched_status.get('id')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark movie done
if not renamer_enabled:
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
# Mark release done
rls.status_id = done_status.get('id')
rls.last_edit = int(time.time())
db.commit()
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
mvie.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('type', '')))
return False
def getSearchTypes(self):
download_types = fireEvent('download.enabled_types', merge = True)
provider_types = fireEvent('provider.enabled_types', merge = True)
if download_types and len(list(set(provider_types) & set(download_types))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_types))
raise NoProviders
for useless_provider in list(set(provider_types) - set(download_types)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_types = download_types
if len(search_types) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
raise NoDownloaders
return search_types
def correctMovie(self, nzb = None, movie = None, quality = None, **kwargs):
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
movie_name = getTitle(movie['library'])
movie_words = re.split('\W+', simplifyString(movie_name))
nzb_name = simplifyString(nzb['name'])
nzb_words = re.split('\W+', nzb_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words').lower())
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(nzb_words) & set(req))) == len(req)
if self.conf('required_words') and req_match == 0:
log.info2('Wrong: Required word missing: %s', nzb['name'])
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words').lower())
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored)
if self.conf('ignored_words') and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic']
pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
return False
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if self.containsOtherQuality(nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
# File to small
if nzb['size'] and preferred_quality['size_min'] > nzb['size']:
log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
return False
# File to large
if nzb['size'] and preferred_quality.get('size_max') < nzb['size']:
log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
return False
# Provider specific functions
get_more = nzb.get('get_more_info')
if get_more:
get_more(nzb)
extra_check = nzb.get('extra_check')
if extra_check and not extra_check(nzb):
return False
if imdb_results:
return True
# Check if nzb contains imdb link
if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']):
return True
for raw_title in movie['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if self.correctName(nzb['name'], movie_title):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year']))
return False
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}):
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size))
found['dvdr'] = True
else: # Assume dvdrip
log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size))
found['dvdrip'] = True
# Allow other qualities
for allowed in preferred_quality.get('allow'):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def checkIMDB(self, haystack, imdbId):
for string in haystack:
if 'imdb.com/title/' + imdbId in string:
return True
return False
def correctYear(self, haystack, year, year_range):
for string in haystack:
year_name = fireEvent('scanner.name_year', string, single = True)
if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
return True
log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
return False
def correctName(self, check_name, movie_name):
check_names = [check_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
check_words = filter(None, re.split('\W+', check_movie.get('name', '')))
movie_words = filter(None, re.split('\W+', simplifyString(movie_name)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
return True
except:
pass
return False
def couldBeReleased(self, is_pre_release, dates):
now = int(time.time())
if not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0):
return True
else:
# For movies before 1972
if dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
if is_pre_release:
# Prerelease 1 week before theaters
if dates.get('theater') - 604800 < now:
return True
else:
# 12 weeks after theater release
if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now:
return True
if dates.get('dvd') > 0:
# 4 weeks before dvd release
if dates.get('dvd') - 2419200 < now:
return True
# Dvd should be released
if dates.get('dvd') < now:
return True
return False
def tryNextReleaseView(self):
trynext = self.tryNextRelease(getParam('id'))
return jsonified({
'success': trynext
})
def tryNextRelease(self, movie_id, manual = False):
snatched_status = fireEvent('status.get', 'snatched', single = True)
ignored_status = fireEvent('status.get', 'ignored', single = True)
try:
db = get_session()
rels = db.query(Release).filter_by(
status_id = snatched_status.get('id'),
movie_id = movie_id
).all()
for rel in rels:
rel.status_id = ignored_status.get('id')
db.commit()
movie_dict = fireEvent('movie.get', movie_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('searcher.single', movie_dict)
return True
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
class SearchSetupError(Exception):
pass
class NoDownloaders(SearchSetupError):
pass
class NoProviders(SearchSetupError):
pass
|
'''
SASSIE Copyright (C) 2011 Joseph E. Curtis
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it under certain
conditions; see http://www.gnu.org/licenses/gpl-3.0.html for details.
'''
from distutils.core import *
from distutils import sysconfig
import numpy
from numpy.distutils.core import Extension, setup
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
matrix_math = Extension(name="matrix_math",sources=['./matrix_math.f'],
include_dirs = [numpy_include],
)
setup( name = "matrix_math",
description = "Module calculates matrix product",
author = "Joseph E. Curtis",
version = "0.1",
ext_modules = [matrix_math]
)
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from PIL import Image
from treemap.images import save_uploaded_image
from treemap.tests import LocalMediaTestCase, media_dir
class SaveImageTest(LocalMediaTestCase):
@media_dir
def test_rotates_image(self):
sideways_file = self.load_resource('tree_sideways.jpg')
img_file, _ = save_uploaded_image(sideways_file, 'test')
expected_width, expected_height = Image.open(sideways_file).size
actual_width, actual_height = Image.open(img_file).size
self.assertEquals(expected_width, actual_height)
self.assertEquals(expected_height, actual_width)
|
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.io.test.common import compute_mulliken_charges, compute_hf_energy
def test_load_wfn_low_he_s():
fn_wfn = context.get_fn('test/he_s_orbital.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'He atom - decontracted 6-31G basis set'
assert numbers.shape == (1,)
assert numbers == [2]
assert coordinates.shape == (1, 3)
assert (coordinates == [0.00, 0.00, 0.00]).all()
assert centers.shape == (4,)
assert (centers == [0, 0, 0, 0]).all()
assert type_assignment.shape == (4,)
assert (type_assignment == [1, 1, 1, 1]).all()
assert exponents.shape == (4,)
assert (exponents == [0.3842163E+02, 0.5778030E+01, 0.1241774E+01, 0.2979640E+00]).all()
assert mo_count.shape == (1,)
assert mo_count == [1]
assert occ_num.shape == (1,)
assert occ_num == [2.0]
assert mo_energy.shape == (1,)
assert mo_energy == [-0.914127]
assert coefficients.shape == (4, 1)
expected = np.array([0.26139500E+00, 0.41084277E+00, 0.39372947E+00, 0.14762025E+00])
assert (coefficients == expected.reshape(4, 1)).all()
assert abs(energy - (-2.855160426155)) < 1.e-5
def test_load_wfn_low_h2o():
fn_wfn = context.get_fn('test/h2o_sto3g.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'H2O Optimization'
assert numbers.shape == (3,)
assert (numbers == np.array([8, 1, 1])).all()
assert coordinates.shape == (3, 3)
assert (coordinates[0] == [-4.44734101, 3.39697999, 0.00000000]).all()
assert (coordinates[1] == [-2.58401495, 3.55136194, 0.00000000]).all()
assert (coordinates[2] == [-4.92380519, 5.20496220, 0.00000000]).all()
assert centers.shape == (21,)
assert (centers[:15] == np.zeros(15, int)).all()
assert (centers[15:] == np.array([1, 1, 1, 2, 2, 2])).all()
assert type_assignment.shape == (21,)
assert (type_assignment[:6] == np.ones(6)).all()
assert (type_assignment[6:15] == np.array([2, 2, 2, 3, 3, 3, 4, 4, 4])).all()
assert (type_assignment[15:] == np.ones(6)).all()
assert exponents.shape == (21,)
assert (exponents[:3] == [0.1307093E+03, 0.2380887E+02, 0.6443608E+01]).all()
assert (exponents[5:8] == [0.3803890E+00, 0.5033151E+01, 0.1169596E+01]).all()
assert (exponents[13:16] == [0.1169596E+01, 0.3803890E+00, 0.3425251E+01]).all()
assert exponents[-1] == 0.1688554E+00
assert mo_count.shape == (5,)
assert (mo_count == [1, 2, 3, 4, 5]).all()
assert occ_num.shape == (5,)
assert np.sum(occ_num) == 10.0
assert (occ_num == [2.0, 2.0, 2.0, 2.0, 2.0]).all()
assert mo_energy.shape == (5,)
assert (mo_energy == np.sort(mo_energy)).all()
assert (mo_energy[:3] == [-20.251576, -1.257549, -0.593857]).all()
assert (mo_energy[3:] == [-0.459729, -0.392617]).all()
assert coefficients.shape == (21, 5)
expected = [0.42273517E+01, -0.99395832E+00, 0.19183487E-11, 0.44235381E+00, -0.57941668E-14]
assert (coefficients[0] == expected).all()
assert coefficients[6, 2] == 0.83831599E+00
assert coefficients[10, 3] == 0.65034846E+00
assert coefficients[17, 1] == 0.12988055E-01
assert coefficients[-1, 0] == -0.46610858E-03
assert coefficients[-1, -1] == -0.33277355E-15
assert abs(energy - (-74.965901217080)) < 1.e-6
def test_get_permutation_orbital():
assert (get_permutation_orbital(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([1, 1, 2, 3, 4])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_orbital(np.array([2, 3, 4])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assign = np.array([1, 1, 2, 2, 3, 3, 4, 4, 1])
expect = [0, 1, 2, 4, 6, 3, 5, 7, 8]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 2, 3, 4, 5, 6, 7]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10])
expect = [0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10])
expect = [0, 1, 3, 5, 2, 4, 6, 7, 8, 9, 10, 11, 12]
assert (get_permutation_orbital(assign) == expect).all()
# f orbitals
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(10)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# h orbitals
assert (get_permutation_orbital(np.arange(36, 57)) == range(21)).all()
assign = np.array([1, 1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(12)).all()
assign = np.array([2, 3, 4, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1, 1])
assert (get_permutation_orbital(assign) == range(15)).all()
def test_get_permutation_basis():
assert (get_permutation_basis(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_basis(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assert (get_permutation_basis(np.array([1, 2, 3, 4, 1])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_basis(np.array([5, 6, 7, 8, 9, 10])) == [0, 3, 4, 1, 5, 2]).all()
assign = np.repeat([5, 6, 7, 8, 9, 10], 2)
expect = [0, 6, 8, 2, 10, 4, 1, 7, 9, 3, 11, 5]
assert (get_permutation_basis(assign) == expect).all()
assert (get_permutation_basis(np.arange(1, 11)) == [0, 1, 2, 3, 4, 7, 8, 5, 9, 6]).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 4, 5, 2, 6, 3, 7]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
expect = [0, 4, 5, 3, 9, 6, 1, 8, 7, 2]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 2, 3, 3, 4, 4])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 13, 15, 12, 14, 16]
assert (get_permutation_basis(assign) == expect).all()
assign = [1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expect = np.array([0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 12, 13, 14, 17, 18, 15, 19, 16])
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == np.arange(21)[::-1]).all()
assign = [23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21]
expect = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == range(21)[::-1]).all()
def test_get_mask():
assert (get_mask(np.array([2, 3, 4])) == [True, False, False]).all()
expected = [True, True, False, False, True, True, False, False]
assert (get_mask(np.array([1, 2, 3, 4, 1, 2, 3, 4])) == expected).all()
expected = [True, False, False, False, False, False]
assert (get_mask(np.array([5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, True, True, False, False, False, False, False]
assert (get_mask(np.array([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, False, False, False, False, False, False, False]
assert (get_mask(np.arange(11, 21)) == expected).all()
assert (get_mask(np.array([21, 24, 25])) == [True, False, False]).all()
assert (get_mask(np.array([11, 21, 36, 1])) == [True, True, True, True]).all()
def check_load_wfn(name):
# system out of *.wfn file
mol1 = IOData.from_file(context.get_fn('test/%s.wfn' % name))
# system out of *.fchk file
mol2 = IOData.from_file(context.get_fn('test/%s.fchk' % name))
# Coordinates check:
assert (abs(mol1.coordinates - mol2.coordinates) < 1e-6).all()
# Numbers check
numbers1 = mol1.numbers
numbers2 = mol2.numbers
assert (numbers1 == numbers2).all()
# Basis Set check:
obasis1 = mol1.obasis
obasis2 = mol2.obasis
assert obasis1.nbasis == obasis2.nbasis
assert (obasis1.shell_map == obasis2.shell_map).all()
assert (obasis1.shell_types == obasis2.shell_types).all()
assert (obasis1.nprims == obasis2.nprims).all()
assert (abs(obasis1.alphas - obasis2.alphas) < 1.e-4).all()
# Comparing MOs (*.wfn might not contain virtual orbitals):
n_mo = mol1.orb_alpha.nfn
assert (abs(mol1.orb_alpha.energies - mol2.orb_alpha.energies[:n_mo]) < 1.e-5).all()
assert (mol1.orb_alpha.occupations == mol2.orb_alpha.occupations[:n_mo]).all()
assert (abs(mol1.orb_alpha.coeffs - mol2.orb_alpha.coeffs[:, :n_mo]) < 1.e-7).all()
# Check overlap
olp1 = obasis1.compute_overlap()
olp2 = obasis2.compute_overlap()
obasis2.compute_overlap(olp2)
assert (abs(olp1 - olp2) < 1e-6).all()
# Check normalization
mol1.orb_alpha.check_normalization(olp1, 1e-5)
# Check charges
dm_full1 = mol1.get_dm_full()
charges1 = compute_mulliken_charges(obasis1, numbers1, dm_full1)
dm_full2 = mol2.get_dm_full()
charges2 = compute_mulliken_charges(obasis2, numbers2, dm_full2)
assert (abs(charges1 - charges2) < 1e-6).all()
# Check energy
energy1 = compute_hf_energy(mol1)
energy2 = compute_hf_energy(mol2)
# check loaded & computed energy from wfn file
assert abs(energy1 - mol1.energy) < 1.e-5
assert abs(energy1 - energy2) < 1e-5
return energy1, charges1
def test_load_wfn_he_s_virtual():
energy, charges = check_load_wfn('he_s_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_s():
energy, charges = check_load_wfn('he_s_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_sp():
energy, charges = check_load_wfn('he_sp_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.859895424589)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spd():
energy, charges = check_load_wfn('he_spd_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855319016184)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdf():
energy, charges = check_load_wfn('he_spdf_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.100269433080)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh():
energy, charges = check_load_wfn('he_spdfgh_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh_virtual():
energy, charges = check_load_wfn('he_spdfgh_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def check_wfn(fn_wfn, restricted, nbasis, energy, charges):
fn_wfn = context.get_fn(fn_wfn)
mol = IOData.from_file(fn_wfn)
assert mol.obasis.nbasis == nbasis
olp = mol.obasis.compute_overlap()
if restricted:
mol.orb_alpha.check_normalization(olp, 1e-5)
assert not hasattr(mol, 'orb_beta')
else:
mol.orb_alpha.check_normalization(olp, 1e-5)
mol.orb_beta.check_normalization(olp, 1e-5)
if energy is not None:
assert abs(energy - mol.energy) < 1.e-5
myenergy = compute_hf_energy(mol)
assert abs(energy - myenergy) < 1e-5
dm_full = mol.get_dm_full()
mycharges = compute_mulliken_charges(mol.obasis, mol.numbers, dm_full)
assert (abs(charges - mycharges) < 1e-5).all()
orb_beta = getattr(mol, 'orb_beta', None)
return mol.obasis, mol.coordinates, mol.numbers, dm_full, mol.orb_alpha, orb_beta, mol.energy
def test_load_wfn_h2o_sto3g_decontracted():
check_wfn(
'test/h2o_sto3g_decontracted.wfn',
True, 21, -75.162231674351,
np.array([-0.546656, 0.273328, 0.273328]),
)
def test_load_wfn_h2_ccpvqz_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/h2_ccpvqz.wfn',
True, 74, -1.133504568400,
np.array([0.0, 0.0]),
)
expect = [82.64000, 12.41000, 2.824000, 0.7977000, 0.2581000]
assert (abs(obasis.alphas[:5] - expect) < 1.e-5).all()
expect = [-0.596838, 0.144565, 0.209605, 0.460401, 0.460401]
assert (orb_alpha.energies[:5] == expect).all()
expect = [12.859067, 13.017471, 16.405834, 25.824716, 26.100443]
assert (orb_alpha.energies[-5:] == expect).all()
assert (orb_alpha.occupations[:5] == [1.0, 0.0, 0.0, 0.0, 0.0]).all()
assert abs(orb_alpha.occupations.sum() - 1.0) < 1.e-6
def test_load_wfn_h2o_sto3g():
check_wfn(
'test/h2o_sto3g.wfn',
True, 21, -74.965901217080,
np.array([-0.330532, 0.165266, 0.165266])
)
def test_load_wfn_li_sp_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/li_sp_virtual.wfn',
False, 8, -3.712905542719,
np.array([0.0, 0.0])
)
assert abs(orb_alpha.occupations.sum() - 2.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 1.0) < 1.e-6
assert (orb_alpha.occupations == [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
assert (orb_beta.occupations == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
expect = [-0.087492, -0.080310, 0.158784, 0.158784, 1.078773, 1.090891, 1.090891, 49.643670]
assert (abs(orb_alpha.energies - expect) < 1.e-6).all()
expect = [-0.079905, 0.176681, 0.176681, 0.212494, 1.096631, 1.096631, 1.122821, 49.643827]
assert (abs(orb_beta.energies - expect) < 1.e-6).all()
assert orb_alpha.coeffs.shape == (8, 8)
assert orb_beta.coeffs.shape == (8, 8)
def test_load_wfn_li_sp():
fn_wfn = context.get_fn('test/li_sp_orbital.wfn')
mol = IOData.from_file(fn_wfn)
assert mol.title == 'Li atom - using s & p orbitals'
assert mol.orb_alpha.nfn == 2
assert mol.orb_beta.nfn == 1
assert abs(mol.energy - (-3.712905542719)) < 1.e-5
def test_load_wfn_o2():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert orb_alpha.nfn == 9
assert orb_beta.nfn == 7
def test_load_wfn_o2_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf_virtual.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert abs(orb_alpha.occupations.sum() - 9.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 7.0) < 1.e-6
assert orb_alpha.occupations.shape == (44,)
assert orb_beta.occupations.shape == (44,)
assert (orb_alpha.occupations[:9] == np.ones(9)).all()
assert (orb_beta.occupations[:7] == np.ones(7)).all()
assert (orb_alpha.occupations[9:] == np.zeros(35)).all()
assert (orb_beta.occupations[7:] == np.zeros(37)).all()
assert orb_alpha.energies.shape == (44,)
assert orb_beta.energies.shape == (44,)
assert orb_alpha.energies[0] == -20.752000
assert orb_alpha.energies[10] == 0.179578
assert orb_alpha.energies[-1] == 51.503193
assert orb_beta.energies[0] == -20.697027
assert orb_beta.energies[15] == 0.322590
assert orb_beta.energies[-1] == 51.535258
assert orb_alpha.coeffs.shape == (72, 44)
assert orb_beta.coeffs.shape == (72, 44)
def test_load_wfn_lif_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lif_fci.wfn',
True, 44, None,
np.array([-0.645282, 0.645282]),
)
assert orb_alpha.occupations.shape == (18,)
assert abs(orb_alpha.occupations.sum() - 6.0) < 1.e-6
assert orb_alpha.occupations[0] == 2.00000000 / 2
assert orb_alpha.occupations[10] == 0.00128021 / 2
assert orb_alpha.occupations[-1] == 0.00000054 / 2
assert orb_alpha.energies.shape == (18,)
assert orb_alpha.energies[0] == -26.09321253
assert orb_alpha.energies[15] == 1.70096290
assert orb_alpha.energies[-1] == 2.17434072
assert orb_alpha.coeffs.shape == (44, 18)
kin = obasis.compute_kinetic()
expected_kin = 106.9326884815 # FCI kinetic energy
expected_nn = 9.1130265227
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
points = np.array([[0.0, 0.0, -0.17008], [0.0, 0.0, 0.0], [0.0, 0.0, 0.03779]])
density = np.zeros(3)
obasis.compute_grid_density_dm(dm_full, points, density)
assert (abs(density - [0.492787, 0.784545, 0.867723]) < 1.e-4).all()
assert abs(energy - (-107.0575700853)) < 1.e-5 # FCI energy
def test_load_wfn_lih_cation_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lih_cation_fci.wfn',
True, 26, None,
np.array([0.913206, 0.086794]),
)
assert (numbers == [3, 1]).all()
expected_kin = 7.7989675958 # FCI kinetic energy
expected_nn = 0.9766607347
kin = obasis.compute_kinetic()
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
assert orb_alpha.occupations.shape == (11,)
assert abs(orb_alpha.occupations.sum() - 1.5) < 1.e-6
assert abs(energy - (-7.7214366383)) < 1.e-5 # FCI energy
|
def is_perfect_number(n):
sum = 0
for x in range(1, n):
if n % x == 0:
sum += x
return sum == n
num = int(input("Please enter a number to check if it is perfect or not"))
print(is_perfect_number(num))
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImageRevision.width'
db.add_column('wiki_imagerevision', 'width',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding field 'ImageRevision.height'
db.add_column('wiki_imagerevision', 'height',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImageRevision.width'
db.delete_column('wiki_imagerevision', 'width')
# Deleting field 'ImageRevision.height'
db.delete_column('wiki_imagerevision', 'height')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_notify.notificationtype': {
'Meta': {'object_name': 'NotificationType', 'db_table': "'notify_notificationtype'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'django_notify.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'notify_settings'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'django_notify.subscription': {
'Meta': {'object_name': 'Subscription', 'db_table': "'notify_subscription'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.NotificationType']"}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'settings': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.Settings']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_articles'", 'null': 'True', 'to': "orm['%s']" % user_orm_label})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.articlesubscription': {
'Meta': {'object_name': 'ArticleSubscription', '_ormbases': ['wiki.ArticlePlugin', 'django_notify.Subscription']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'subscription_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['django_notify.Subscription']", 'unique': 'True'})
},
'wiki.attachment': {
'Meta': {'object_name': 'Attachment', '_ormbases': ['wiki.ReusablePlugin']},
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.AttachmentRevision']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.attachmentrevision': {
'Meta': {'ordering': "('created',)", 'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Attachment']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.AttachmentRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['wiki.RevisionPlugin']},
'revisionplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.RevisionPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.imagerevision': {
'Meta': {'object_name': 'ImageRevision', '_ormbases': ['wiki.RevisionPluginRevision']},
'height': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '2000'}),
'revisionpluginrevision_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.RevisionPluginRevision']", 'unique': 'True', 'primary_key': 'True'}),
'width': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
},
'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'plugin_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.RevisionPluginRevision']"})
},
'wiki.revisionpluginrevision': {
'Meta': {'object_name': 'RevisionPluginRevision'},
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revision_set'", 'to': "orm['wiki.RevisionPlugin']"}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.RevisionPluginRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.simpleplugin': {
'Meta': {'object_name': 'SimplePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'article_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']"}),
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.urlpath': {
'Meta': {'unique_together': "(('site', 'parent', 'slug'),)", 'object_name': 'URLPath'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.URLPath']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki']
|
PROJECT_DEFAULTS = 'Project Defaults'
PATHS = 'Paths'
_from_config = {
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprints': None
}
_from_args = {
'name': None,
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprint': None
}
def load_args(args):
from_args = _from_args.copy()
keys = _from_args.keys()
for key in keys:
if args.__contains__(key):
from_args[key] = args.__getattribute__(key)
return from_args
def load_config(config):
from_config = _from_config.copy()
keys = _from_config.keys()
if config:
if config.has_section(PROJECT_DEFAULTS):
for key in keys:
if config.has_option(PROJECT_DEFAULTS, key):
from_config[key] = config.get(PROJECT_DEFAULTS, key)
if config.has_section(PATHS):
for key in keys:
if config.has_option(PATHS, key):
from_config[key] = config.get(PATHS, key)
return from_config
def merge_configged_argged(configged, argged):
merged = configged.copy()
for key in argged.keys():
if True in [key == k for k in configged.keys()]:
# We only care about a None val if the key exists in configged
# this will overwrite the config so that args take percedence
if argged[key] is not None:
merged[key] = argged[key]
else:
# If the key is not already here, then it must be 'footprint', in
# which case we definitely want to include it since that is our
# highest priority and requires less args to generate a project
merged[key] = argged[key]
return merged
def footprint_requires(merged):
required = ['name', 'parent']
passed = 0
pass_requires = len(required)
for r in required:
if r in merged.keys():
if merged[r] is not None:
passed += 1
return passed == pass_requires
def solo_args_requires(args):
required = ['name', 'parent', 'language', 'type']
passed = 0
pass_requires = len(required)
for r in required:
if r in args.keys():
if args[r] is not None:
passed += 1
return passed == pass_requires
def validate_args(args, config):
if config is not None:
configged = load_config(config)
argged = load_args(args)
merged = merge_configged_argged(configged, argged)
# If footprint is provided, we only need name and parent
if merged['footprint'] is not None:
return footprint_requires(merged), merged
# If no footprint, we need name, parent, language, and type to perform
# footprint lookups
if None not in [merged['name'], merged['parent'], merged['language'],
merged['type']]:
return True, merged
return False, merged
argged = load_args(args)
return solo_args_requires(argged), argged
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.