code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
sentry.utils.strings
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import base64
import codecs
import re
import six
import string
import zlib
from django.utils.encoding import force_text, smart_text
_word_sep_re = re.compile(r'[\s.;,_-]+(?u)')
_camelcase_re = re.compile(r'(?:[A-Z]{2,}(?=[A-Z]))|(?:[A-Z][a-z0-9]+)|(?:[a-z0-9]+)')
_letters_re = re.compile(r'[A-Z]+')
_digit_re = re.compile(r'\d+')
_sprintf_placeholder_re = re.compile(
r'%(?:\d+\$)?[+-]?(?:[ 0]|\'.{1})?-?\d*(?:\.\d+)?[bcdeEufFgGosxX]'
)
def truncatechars(value, arg, ellipsis='...'):
# TODO (alex) could use unicode ellipsis: u'\u2026'
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
if value is None:
return value
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:max(0, length - len(ellipsis))] + ellipsis
return value
def compress(value):
"""
Compresses a value for safe passage as a string.
This returns a unicode string rather than bytes, as the Django ORM works
with unicode objects.
"""
return base64.b64encode(zlib.compress(value)).decode('utf-8')
def decompress(value):
return zlib.decompress(base64.b64decode(value))
def gunzip(value):
return zlib.decompress(value, 16 + zlib.MAX_WBITS)
def strip(value):
if not value:
return ''
return smart_text(value).strip()
def soft_hyphenate(value, length, hyphen=u'\u00ad'):
return hyphen.join([value[i:(i + length)] for i in range(0, len(value), length)])
def soft_break(value, length, process=lambda chunk: chunk):
"""
Encourages soft breaking of text values above a maximum length by adding
zero-width spaces after common delimeters, as well as soft-hyphenating long
identifiers.
"""
delimiters = re.compile(r'([{}]+)'.format(''.join(map(re.escape, ',.$:/+@!?()<>[]{}'))))
def soft_break_delimiter(match):
results = []
value = match.group(0)
chunks = delimiters.split(value)
for i, chunk in enumerate(chunks):
if i % 2 == 1: # check if this is this a delimiter
results.extend([chunk, u'\u200b'])
else:
results.append(process(chunk))
return u''.join(results).rstrip(u'\u200b')
return re.sub(r'\S{{{},}}'.format(length), soft_break_delimiter, value)
def to_unicode(value):
try:
value = six.text_type(force_text(value))
except (UnicodeEncodeError, UnicodeDecodeError):
value = '(Error decoding value)'
except Exception: # in some cases we get a different exception
try:
value = six.text_type(repr(type(value)))
except Exception:
value = '(Error decoding value)'
return value
def split_camelcase(word):
pieces = _camelcase_re.findall(word)
# Unicode characters or some stuff, ignore it.
if sum(len(x) for x in pieces) != len(word):
yield word
else:
for piece in pieces:
yield piece
def split_any_wordlike(value, handle_camelcase=False):
for word in _word_sep_re.split(value):
if handle_camelcase:
for chunk in split_camelcase(word):
yield chunk
else:
yield word
def tokens_from_name(value, remove_digits=False):
for word in split_any_wordlike(value, handle_camelcase=True):
if remove_digits:
word = _digit_re.sub('', word)
word = word.lower()
if word:
yield word
valid_dot_atom_characters = frozenset(
string.ascii_letters +
string.digits +
".!#$%&'*+-/=?^_`{|}~")
def is_valid_dot_atom(value):
"""Validate an input string as an RFC 2822 dot-atom-text value."""
return (
isinstance(value, six.string_types) # must be a string type
and not value[0] == '.' and not value[-1] == '.' # cannot start or end with a dot
and set(value).issubset(valid_dot_atom_characters)
) # can only contain valid characters
def count_sprintf_parameters(string):
"""Counts the number of sprintf parameters in a string."""
return len(_sprintf_placeholder_re.findall(string))
def codec_lookup(encoding, default='utf-8'):
"""Safely lookup a codec and ignore non-text codecs,
falling back to a default on errors.
Note: the default value is not sanity checked and would
bypass these checks."""
def _get_default():
if default is not None:
return codecs.lookup(default)
if not encoding:
return _get_default()
try:
info = codecs.lookup(encoding)
except (LookupError, TypeError):
return _get_default()
try:
# Check for `CodecInfo._is_text_encoding`.
# If this attribute exists, we can assume we can operate
# with this encoding value safely. This attribute was
# introduced into 2.7.12, so versions prior to this will
# raise, but this is the best we can do.
if not info._is_text_encoding:
return _get_default()
except AttributeError:
pass
# `undefined` is special a special encoding in python that 100% of
# the time will raise, so ignore it.
if info.name == 'undefined':
return _get_default()
return info
|
looker/sentry
|
src/sentry/utils/strings.py
|
Python
|
bsd-3-clause
| 5,551
|
from datetime import timedelta, datetime
from config.config import CONFIG_VARS as cvar
import github_api
import send_response
import util
def manage_needs_reply_issue(repo_username, repo_id, issue):
if not issue:
return
number = issue.get('number')
if not number:
return
if not has_needs_reply_label(issue):
return
issue_events = github_api.fetch_issue_events(repo_username, repo_id, number)
if not issue_events or not isinstance(issue_events, list):
return
need_reply_label_added = get_most_recent_datetime_need_reply_label_added(issue_events)
if not need_reply_label_added:
return
issue_comments = github_api.fetch_issue_comments(repo_username, repo_id, number)
if not issue_comments or not isinstance(issue_comments, list):
return
most_recent_response = get_most_recent_datetime_creator_response(issue, issue_comments)
if not most_recent_response:
return
print 'Needs reply: %s, label added: %s, most recent response: %s' % (number, need_reply_label_added, most_recent_response)
if has_replied_since_adding_label(need_reply_label_added, most_recent_response):
print 'has_replied_since_adding_label, removing label: %s' % number
return remove_needs_reply_label(repo_username, repo_id, number, issue)
if not has_replied_in_timely_manner(need_reply_label_added):
print 'not has_replied_in_timely_manner, closing issue: %s' % number
return close_needs_reply_issue(repo_username, repo_id, number)
def has_needs_reply_label(issue):
if not issue:
return False
try:
labels = issue.get('labels')
if not labels or not len(labels):
return False
has_needs_reply_label = False
for label in labels:
if cvar['NEEDS_REPLY_LABEL'] == label.get('name'):
return True
except Exception as ex:
print 'has_needs_reply_label error: %s' % ex
return False
def get_most_recent_datetime_need_reply_label_added(events):
try:
most_recent = datetime(2000, 1, 1)
has_recent = False
if not events or not len(events):
return
for event in events:
event_type = event.get('event')
if event_type != 'labeled':
continue
label = event.get('label')
if not label:
continue
label_name = label.get('name')
if not label_name:
continue
if not cvar['NEEDS_REPLY_LABEL'] == label.get('name'):
continue
created_str = event.get('created_at')
if not created_str:
continue
created_at = datetime.strptime(created_str, '%Y-%m-%dT%H:%M:%SZ')
if created_at > most_recent:
has_recent = True
most_recent = created_at
if has_recent:
return most_recent
except Exception as ex:
print 'get_most_recent_datetime_need_reply_label_added error: %s' % ex
def get_most_recent_datetime_creator_response(issue, comments):
try:
if not issue:
return
creator = issue.get('user')
if not creator:
return
creator_login = creator.get('login')
if not creator_login:
return
created_str = issue.get('created_at')
if not created_str:
return
most_recent = util.get_date(created_str)
if not comments or not len(comments):
return most_recent
for comment in comments:
comment_user = comment.get('user')
if not comment_user:
continue
comment_login = comment_user.get('login')
if comment_login != creator_login:
continue
created_str = comment.get('created_at')
if not created_str:
continue
created_at = util.get_date(created_str)
if created_at > most_recent:
most_recent = created_at
return most_recent
except Exception as ex:
print 'get_most_recent_datetime_creator_comment error: %s' % ex
def has_replied_since_adding_label(need_reply_label_added, most_recent_response):
return most_recent_response >= need_reply_label_added
def has_replied_in_timely_manner(need_reply_label_added, now=datetime.now(), close_no_reply_after=cvar['CLOSE_NOREPLY_AFTER']):
not_cool_date = need_reply_label_added + timedelta(days=close_no_reply_after)
return now < not_cool_date
def remove_needs_reply_label(repo_username, repo_id, number, issue):
try:
return {
'remove_needs_reply_label': github_api.remove_issue_labels(repo_username, repo_id, number, [cvar['NEEDS_REPLY_LABEL']], issue=issue),
'remove_needs_reply_comment': remove_needs_reply_comment(repo_username, repo_id, number)
}
except Exception as ex:
print 'remove_needs_reply_label error: %s' % ex
def remove_needs_reply_comment(repo_username, repo_id, number, issue_comments=None, needs_reply_content_id=cvar['NEEDS_REPLY_CONTENT_ID'], is_debug=cvar['DEBUG']):
try:
if issue_comments is None:
issue_comments = github_api.fetch_issue_comments(repo_username, repo_id, number)
if not issue_comments or not isinstance(issue_comments, list):
return 'invalid comments'
for issue_comment in issue_comments:
body = issue_comment.get('body')
comment_id = issue_comment.get('id')
if body and needs_reply_content_id in body and comment_id:
if not is_debug:
github_api.delete_issue_comment(repo_username, repo_id, comment_id, number=number)
return 'removed auto comment'
return 'no comment to remove'
except Exception as ex:
return 'remove_needs_reply_comment: %s' % ex
def close_needs_reply_issue(repo_username, repo_id, number):
try:
return {
'close_needs_reply_issue': send_response.submit_issue_response(repo_username, repo_id, number, 'close', 'no_reply', None)
}
except Exception as ex:
print 'close_needs_reply_issue error: %s' % ex
|
driftyco/ionitron-issues
|
tasks/needs_reply.py
|
Python
|
mit
| 6,314
|
rooms = []
def shift_characters(room, shift_num):
shift_num = shift_num % 26
words = room.split('-')[0:-1]
new_words = []
for word in words:
new_word = []
index = 0
while index < len(word):
new_num = shift_num + ord(word[index])
if new_num > 122:
new_num = 122 - 97
new_word.append(chr(new_num))
index += 1
new_words.append(''.join(new_word))
return new_words
def get_sectorid(room):
sectorid = ''.join(room.split('-')[-1:])
return int(sectorid.split('[')[0])
def find_room_code():
for room in rooms:
sectorid = get_sectorid(room)
print ' '.join(shift_characters(room, sectorid)) + ' - ' + str(sectorid)
rooms = [
'nzydfxpc-rclop-qwzhpc-qtylyntyr-769[oshgk]',
'qzlozfhmf-bzmcx-bnzshmf-zbpthrhshnm-339[zmxdi]',
'xtwtelcj-rclop-upwwjmply-zapcletzyd-743[itbds]',
'mrxivrexmsrep-fewoix-ywiv-xiwxmrk-308[kzypw]',
'ibghopzs-qobrm-difqvogwbu-142[lnrzo]',
'irgyyolokj-xghhoz-lotgtiotm-228[vnmxd]',
'foadouwbu-gqojsbusf-vibh-fsgsofqv-376[ymswi]',
'jvuzbtly-nyhkl-wyvqljapsl-wshzapj-nyhzz-ylzlhyjo-903[jnyaz]',
'amjmpdsj-cee-qcptgacq-236[caejm]',
'nvrgfezqvu-avccpsvre-crsfirkfip-217[nrzcd]',
'vdzonmhydc-bzmcx-bnzshmf-zbpthrhshnm-235[znyot]',
'oxaflxzqfsb-mixpqfz-doxpp-abmxoqjbkq-393[xqbfo]',
'drxevkzt-upv-crsfirkfip-893[rfikp]',
'rkpqxyib-zelzlixqb-obzbfsfkd-627[wzymt]',
'ajvyjprwp-kdwwh-bjunb-563[jwbpa]',
'nzcczdtgp-eza-dpncpe-mldvpe-opalcexpye-171[wjzyc]',
'jxdkbqfz-zixppfcfba-mixpqfz-doxpp-abmilvjbkq-809[nbaoc]',
'tbxmlkfwba-zelzlixqb-xkxivpfp-315[yixla]',
'gvcskirmg-gerhc-gsexmrk-hizipstqirx-620[zyosg]',
'ygcrqpkbgf-ecpfa-eqcvkpi-uvqtcig-440[sbpfj]',
'irdgrxzex-vxx-rthlzjzkzfe-425[zdqok]',
'szfyrqriuflj-tfcfiwlc-wcfnvi-uvjzxe-425[lfipb]',
'zgmfyxypbmsq-aylbw-bcqgel-912[dqtlr]',
'ubhatstkwhnl-cxeeruxtg-wxitkmfxgm-475[ectpi]',
'ejpanjwpekjwh-fahhuxawj-wymqeoepekj-316[utnvq]',
'qlm-pbzobq-ciltbo-pqloxdb-601[lktqh]',
'votubcmf-fhh-efwfmpqnfou-493[aiyzx]',
'jqwpihizlwca-uqtqbizg-ozilm-kivlg-twoqabqka-382[frmkh]',
'zovldbkfz-bdd-tlohpelm-497[dcfxw]',
'atyzghrk-yigbktmkx-natz-jkyomt-748[ktyag]',
'iuxxuyobk-hgyqkz-iayzuskx-ykxboik-566[yzsxi]',
'jvsvymbs-zjhclunly-obua-klzpnu-903[ckmyr]',
'sbnqbhjoh-ezf-tbmft-623[bfhte]',
'forwcoqhwjs-rms-difqvogwbu-870[owfqr]',
'yhtwhnpun-ibuuf-zopwwpun-721[tjlfz]',
'guahyncw-wbiwifuny-omyl-nymncha-968[nywac]',
'ykjoqian-cnwza-bhksan-zaoecj-576[btksq]',
'ckgvutofkj-igtje-zxgototm-800[mlqgz]',
'ipvohghykvbz-jvsvymbs-lnn-dvyrzovw-227[pysto]',
'jqwpihizlwca-zilqwikbqdm-ntwemz-kwvbiqvumvb-382[antsy]',
'njmjubsz-hsbef-cvooz-mbcpsbupsz-805[bferv]',
'ojk-nzxmzo-xgvnndadzy-xcjxjgvoz-mzxzdqdib-785[vzpun]',
'yhkpvhjapcl-ihzrla-svnpzapjz-747[pahzj]',
'htqtwkzq-zsxyfgqj-kqtbjw-rfsfljrjsy-827[wzxif]',
'fubrjhqlf-sodvwlf-judvv-ilqdqflqj-257[flqdj]',
'forwcoqhwjs-qobrm-qcohwbu-rsgwub-116[gtsir]',
'nzydfxpc-rclop-mfyyj-cpdplcns-483[gzysm]',
'cjpibabsepvt-cbtlfu-nbobhfnfou-857[bfcno]',
'oxmeeuruqp-ngzzk-geqd-fqefuzs-222[equzf]',
'mvkccspson-lexxi-bomosfsxq-614[ybgsn]',
'frqvxphu-judgh-fdqgb-frdwlqj-vhuylfhv-231[bojwc]',
'sno-rdbqds-bgnbnkzsd-btrsnldq-rdquhbd-989[dbnsq]',
'wrs-vhfuhw-fdqgb-frdwlqj-uhvhdufk-179[fhduw]',
'ftzgxmbv-ynssr-vahvhetmx-mktbgbgz-397[truqi]',
'xst-wigvix-tpewxmg-kveww-ywiv-xiwxmrk-984[nmrid]',
'dkqjcbctfqwu-rncuvke-itcuu-ucngu-544[inaxl]',
'hwdtljsnh-wfggny-fsfqdxnx-151[anbvm]',
'aflwjfslagfsd-bwddqtwsf-vwhdgqewfl-970[sjqtc]',
'hmsdqmzshnmzk-okzrshb-fqzrr-sqzhmhmf-885[qzbly]',
'xcitgcpixdcpa-qphzti-gtrtxkxcv-141[ctxip]',
'nbhofujd-tdbwfohfs-ivou-gjobodjoh-649[fnwgk]',
'wfruflnsl-wfggny-xjwanhjx-905[lnztx]',
'jrncbavmrq-rtt-fgbentr-975[rtbna]',
'jqwpihizlwca-akidmvomz-pcvb-bziqvqvo-460[zakpd]',
'oxmeeuruqp-nmewqf-eqdhuoqe-560[vtznk]',
'plolwdub-judgh-udeelw-uhvhdufk-985[fnsty]',
'kfg-jvtivk-treup-ivrthlzjzkzfe-503[ktvze]',
'bqvvu-ywjzu-ykwpejc-naoawnyd-550[txaws]',
'gsvvswmzi-wgezirkiv-lyrx-qerekiqirx-984[irevg]',
'vcibutulxiom-yaa-uhufsmcm-552[gpimb]',
'ugjjgkanw-usfvq-ugslafy-lwuzfgdgyq-918[gufaj]',
'qfmcusbwq-foadouwbu-dzoghwq-ufogg-hsqvbczcum-506[uoqbc]',
'qzchnzbshud-rbzudmfdq-gtms-zmzkxrhr-547[gxjyh]',
'aflwjfslagfsd-usfvq-ugslafy-esjcwlafy-450[kjnil]',
'nzwzcqfw-nsznzwlep-afcnsldtyr-951[hjrdi]',
'aoubshwq-pogysh-kcfygvcd-740[pxyzt]',
'zbytomdsvo-lkcuod-nocsqx-380[rqsuo]',
'fmsledevhsyw-jpsaiv-wivzmgiw-542[pitok]',
'qvbmzvibqwvit-ntwemz-abwziom-304[urjat]',
'gvaaz-tdbwfohfs-ivou-eftjho-389[inlud]',
'wlqqp-avccpsvre-ivjvrity-399[qcgto]',
'shmml-pelbtravp-pubpbyngr-znantrzrag-897[prabn]',
'iruzfrtkzmv-jtrmvexvi-ylek-jvimztvj-477[vijmr]',
'hdgdovmt-bmvyz-xjgjmapg-xviyt-xjvodib-yzqzgjkhzio-499[tsira]',
'willimcpy-yaa-omyl-nymncha-916[jtwau]',
'dpssptjwf-gmpxfs-bobmztjt-389[kjlin]',
'gpbepvxcv-snt-tcvxcttgxcv-245[etyfm]',
'jxdkbqfz-ciltbo-cfkxkzfkd-367[xldij]',
'udglrdfwlyh-sodvwlf-judvv-pdunhwlqj-231[rslnt]',
'ygcrqpkbgf-ecpfa-eqcvkpi-gpikpggtkpi-440[pgkci]',
'sno-rdbqds-eknvdq-otqbgzrhmf-391[yzexr]',
'lzfmdshb-atmmx-bnmszhmldms-313[bacsl]',
'qczcftiz-xszzmpsob-aofyshwbu-974[nxzyq]',
'nwzekwypera-fahhuxawj-iwngapejc-420[yzguv]',
'sbnqbhjoh-gmpxfs-efwfmpqnfou-389[sbwne]',
'ugjjgkanw-xdgowj-ksdwk-554[sntqw]',
'wdjcvuvmyjpn-kgvnodx-bmvnn-ncdkkdib-213[apifx]',
'nvrgfezqvu-tyftfcrkv-uvgcfpdvek-269[tiuvc]',
'uiovmbqk-jcvvg-abwziom-252[trvgn]',
'fydelmwp-prr-ecltytyr-561[ryelp]',
'ujoon-rpcsn-rdpixcv-jhtg-ithixcv-999[dcbon]',
'ahngzyzqcntr-eknvdq-dmfhmddqhmf-131[dhmnq]',
'iqmbazulqp-ngzzk-fqotzaxask-222[nmtgf]',
'tfejldvi-xiruv-avccpsvre-nfibjyfg-113[vfice]',
'wlsiayhcw-nij-mywlyn-xsy-nywbhifias-552[otlcf]',
'muqfedyput-sqdto-seqjydw-skijecuh-iuhlysu-764[mqlfy]',
'wbhsfbohwcboz-suu-hfowbwbu-324[bwhou]',
'owshgfarwv-hdsklau-yjskk-suimakalagf-944[wtiza]',
'ahngzyzqcntr-rbzudmfdq-gtms-bnmszhmldms-859[yvztm]',
'qyujihctyx-vohhs-fiacmncwm-292[knzep]',
'vetllbybxw-xzz-ybgtgvbgz-579[bgzlt]',
'zekvierkzferc-tyftfcrkv-uvgrikdvek-477[pecqo]',
'molgbzqfib-mixpqfz-doxpp-obzbfsfkd-367[ptnjg]',
'oxmeeuruqp-otaoaxmfq-geqd-fqefuzs-924[puwxb]',
'gpewwmjmih-nippcfier-gsrxemrqirx-646[tpsoa]',
'xgvnndadzy-wvnfzo-gvwjmvojmt-603[nfstl]',
'bjfutsneji-hfsid-htfynsl-htsyfnsrjsy-983[fdzej]',
'pynffvsvrq-wryylorna-qrcnegzrag-351[dymsz]',
'xjgjmapg-rzvkjiduzy-wvnfzo-kpmxcvndib-109[eyrcm]',
'sebehvkb-rkddo-vydqdsydw-296[lgonm]',
'hjgbwuladw-uzgugdslw-kwjnauwk-138[kgloe]',
'ohmnuvfy-wuhxs-wiuncha-uwkocmcncih-890[tvylz]',
'udpsdjlqj-iorzhu-wudlqlqj-491[rtwma]',
'rdggdhxkt-ytaanqtpc-ejgrwphxcv-635[gtacd]',
'eqttqukxg-ecpfa-yqtmujqr-596[hnvwy]',
'laffe-lruckx-iutzgotsktz-852[nglsy]',
'pkl-oaynap-xwogap-yqopkian-oanreya-394[ebzft]',
'kyelcrga-afmamjyrc-bcnyprkclr-314[crayk]',
'joufsobujpobm-dipdpmbuf-ufdiopmphz-571[ytgxs]',
'ydjuhdqjyedqb-fbqijys-whqii-tufqhjcudj-894[ekswi]',
'ajvyjprwp-kdwwh-mnenuxyvnwc-251[psaok]',
'ibghopzs-suu-gsfjwqsg-220[qshwm]',
'ugjjgkanw-hdsklau-yjskk-dstgjslgjq-502[lqxth]',
'ugdgjxmd-hdsklau-yjskk-hmjuzskafy-788[vtyzi]',
'lqwhuqdwlrqdo-mhoobehdq-oderudwrub-751[bzype]',
'wdjcvuvmyjpn-kgvnodx-bmvnn-pnzm-oznodib-837[dcbaw]',
'vcibutulxiom-qyujihctyx-wbiwifuny-fiacmncwm-994[tjxnm]',
'vqr-ugetgv-dcumgv-tgegkxkpi-102[gvekt]',
'encuukhkgf-lgnnadgcp-wugt-vguvkpi-752[vdmps]',
'ktfitzbgz-wrx-vhgmtbgfxgm-267[twjsh]',
'dzczkrip-xiruv-jtrmvexvi-ylek-kvtyefcfxp-243[vctis]',
'pynffvsvrq-enzcntvat-qlr-qrcnegzrag-377[vdxzy]',
'zixppfcfba-mixpqfz-doxpp-ixyloxqlov-679[pxfio]',
'shoewudys-isqludwuh-xkdj-kiuh-juijydw-608[qedlg]',
'qjopwxha-ywjzu-ykwpejc-nayaerejc-524[syznu]',
'upq-tfdsfu-dboez-dpbujoh-nbobhfnfou-415[bfoud]',
'fruurvlyh-udeelw-dqdobvlv-907[psznj]',
'tcfkqcevkxg-dwppa-wugt-vguvkpi-336[klouj]',
'plolwdub-judgh-gbh-uhdftxlvlwlrq-621[bnwav]',
'chnylhuncihuf-wuhxs-wiuncha-xypyfijgyhn-110[hnuyc]',
'froruixo-gbh-rshudwlrqv-153[amoni]',
'clxalrtyr-nsznzwlep-zapcletzyd-327[guevs]',
'xgsvgmotm-igtje-rghuxgzuxe-306[lcxzy]',
'gpsxdprixkt-rpcsn-gtprfjxhxixdc-401[xprcd]',
'lsyrkjkbnyec-nio-bokmaescsdsyx-874[nkgyo]',
'buzahisl-ibuuf-zopwwpun-747[vwuxt]',
'vqr-ugetgv-fag-yqtmujqr-882[gqrtu]',
'bgmxkgtmbhgte-wrx-wxlbzg-137[zquts]',
'ujqgywfau-ugdgjxmd-tskcwl-hmjuzskafy-112[yolzm]',
'lqwhuqdwlrqdo-fkrfrodwh-frqwdlqphqw-985[qwdrf]',
'surmhfwloh-fdqgb-uhvhdufk-621[dimhn]',
'fhezusjybu-hqrryj-qdqboiyi-322[esjtk]',
'gvaaz-tdbwfohfs-ivou-tijqqjoh-857[oafhi]',
'xmtjbzidx-xviyt-mznzvmxc-759[xmzit]',
'tfiifjzmv-gcrjkzt-xirjj-dribvkzex-399[ijrzf]',
'zuv-ykixkz-kmm-uvkxgzouty-748[xvipz]',
'gokzyxsjon-cmkfoxqob-rexd-domrxyvyqi-172[yfozw]',
'plolwdub-judgh-frqvxphu-judgh-sodvwlf-judvv-frqwdlqphqw-959[duhlq]',
'kfg-jvtivk-treup-tfekrzedvek-373[xyqzr]',
'dmybmsuzs-oaddaeuhq-qss-pqeusz-586[mansh]',
'ryexqpqhteki-rqiauj-iqbui-660[trqbl]',
'ytu-xjhwjy-jll-wjhjnansl-125[zketn]',
'fhezusjybu-muqfedyput-sqdto-udwyduuhydw-660[udyef]',
'qspkfdujmf-dipdpmbuf-mphjtujdt-909[dwuov]',
'cebwrpgvyr-enqvbnpgvir-enoovg-nanylfvf-533[akmyb]',
'qfmcusbwq-pogysh-hfowbwbu-246[hdsbg]',
'dmybmsuzs-bxmefuo-sdmee-abqdmfuaze-846[wsntq]',
'sno-rdbqds-azrjds-nodqzshnmr-157[ogtbm]',
'dpotvnfs-hsbef-cbtlfu-qvsdibtjoh-493[isjkl]',
'nwilwcejc-lhwopey-cnwoo-odellejc-732[isofe]',
'gokzyxsjon-mkxni-mykdsxq-bomosfsxq-822[nyudi]',
'molgbzqfib-gbiivybxk-obxznrfpfqflk-679[wotra]',
'cybyjqho-whqtu-rqiauj-jhqydydw-920[qyhjd]',
'tinnm-pogysh-ghcfous-506[xtcso]',
'myvybpev-cmkfoxqob-rexd-cdybkqo-146[boycd]',
'pbeebfvir-shmml-enoovg-chepunfvat-949[nltsr]',
'apuut-ezggtwzvi-yzqzgjkhzio-135[gdywb]',
'xfbqpojafe-ezf-bobmztjt-441[qbwdc]',
'ktfitzbgz-xzz-ehzblmbvl-943[zdigt]',
'eqnqthwn-dcumgv-rwtejcukpi-232[umdca]',
'xlrypetn-clmmte-opawzjxpye-379[bkvqj]',
'fydelmwp-awldetn-rcldd-lylwjdtd-821[dlwet]',
'owshgfarwv-esyfwlau-kusnwfywj-zmfl-ugflsafewfl-658[fwlsa]',
'bnmrtldq-fqzcd-eknvdq-sqzhmhmf-807[snkmf]',
'tyepcyletzylw-upwwjmply-cpnptgtyr-951[kyfol]',
'ckgvutofkj-igtje-iugzotm-ynovvotm-462[otgvi]',
'jshzzpmplk-ihzrla-klzpnu-981[yiaum]',
'aoubshwq-qczcftiz-qvcqczohs-qighcasf-gsfjwqs-896[hyxzq]',
'zilqwikbqdm-kpwkwtibm-ewzsapwx-668[zujgy]',
'tpspahyf-nyhkl-jhukf-jvhapun-zopwwpun-669[slmnt]',
'iuruxlar-lruckx-aykx-zkyzotm-852[kruxa]',
'bdavqofuxq-bxmefuo-sdmee-dqeqmdot-144[gblym]',
'eqnqthwn-ygcrqpkbgf-dcumgv-octmgvkpi-778[aznbf]',
'amppmqgtc-kyelcrga-bwc-ylyjwqgq-522[aedsm]',
'diozmivodjivg-xviyt-xjvodib-vxlpdndodji-109[dcnaq]',
'wifilzof-wbiwifuny-uwkocmcncih-474[lcymu]',
'dpmpsgvm-dboez-dpbujoh-efwfmpqnfou-779[ujlre]',
'iwcjapey-fahhuxawj-nayaerejc-758[elvba]',
'tcorcikpi-ejqeqncvg-vgejpqnqia-414[tywiv]',
'excdklvo-mrymyvkdo-wkbuodsxq-120[ngxcw]',
'nbhofujd-qmbtujd-hsbtt-bobmztjt-467[evdab]',
'fubrjhqlf-gbh-pdqdjhphqw-959[nmklz]',
'lxuxaodu-snuuhknjw-cnlqwxuxph-667[skyjn]',
'ytu-xjhwjy-jll-zxjw-yjxynsl-931[tyzfu]',
'cjpibabsepvt-sbccju-tbmft-519[ijnyz]',
'bkwzkqsxq-lexxi-cobfsmoc-406[xbcko]',
'yaxsnlcrun-dwbcjkun-kdwwh-orwjwlrwp-823[wnrcd]',
'jchipqat-gpqqxi-ejgrwphxcv-739[pqcgh]',
'etyyx-dff-cdudknoldms-937[dfyce]',
'jsehsyafy-vqw-dgyaklauk-112[rhsgp]',
'nzwzcqfw-awldetn-rcldd-opalcexpye-587[cdelw]',
'nvrgfezqvu-wcfnvi-uvjzxe-139[vefnu]',
'apuut-kgvnodx-bmvnn-ncdkkdib-915[stofz]',
'hwdtljsnh-ojqqdgjfs-zxjw-yjxynsl-489[ftzsy]',
'pualyuhapvuhs-jhukf-jvhapun-ayhpupun-877[oflqz]',
'yaxsnlcrun-bljenwpna-qdwc-mnenuxyvnwc-407[ncwae]',
'eqnqthwn-ejqeqncvg-nqikuvkeu-102[exsvc]',
'ynssr-lvtoxgzxk-angm-tvjnblbmbhg-813[bgnlm]',
'sehheiylu-zubboruqd-cqdqwucudj-400[ziuys]',
'awzwhofm-ufors-qobrm-gozsg-246[zurge]',
'ahngzyzqcntr-bzmcx-bnzshmf-lzmzfdldms-677[zmnbc]',
'eza-dpncpe-awldetn-rcldd-wzrtdetnd-769[mtgef]',
'htsxzrjw-lwfij-kqtbjw-wjxjfwhm-931[sjxwa]',
'szfyrqriuflj-jtrmvexvi-ylek-nfibjyfg-373[alfvj]',
'emixwvqhml-ntwemz-amzdqkma-876[lutzf]',
'hwbba-fag-tgegkxkpi-726[zilbc]',
'nwilwcejc-zua-zalhkuiajp-212[acijl]',
'aflwjfslagfsd-esyfwlau-kusnwfywj-zmfl-mkwj-lwklafy-892[flwas]',
'fmsledevhsyw-fewoix-xiglrspskc-256[sefil]',
'zuv-ykixkz-lruckx-ykxboiky-306[odviu]',
'ftzgxmbv-utldxm-ftgtzxfxgm-969[noxmk]',
'ojk-nzxmzo-nxvqzibzm-cpio-mzvxlpdndodji-109[nfysr]',
'molgbzqfib-mixpqfz-doxpp-abmilvjbkq-133[bimpq]',
'zekvierkzferc-tyftfcrkv-jkfirxv-867[bjyft]',
'gvaaz-ezf-bobmztjt-727[muyvq]',
'mvhkvbdib-zbb-hvivbzhzio-551[yntvz]',
'dwbcjkun-kdwwh-ujkxajcxah-641[jkwac]',
'lhkhszqx-fqzcd-qzaahs-lzmzfdldms-209[tklbi]',
'njmjubsz-hsbef-sbccju-qvsdibtjoh-571[tpswx]',
'avw-zljyla-lnn-zopwwpun-149[lnwap]',
'ckgvutofkj-hgyqkz-iayzuskx-ykxboik-358[mdnay]',
'ytu-xjhwjy-hfsid-rfsfljrjsy-697[hmiag]',
'houngfgxjuay-igtje-sgtgmksktz-384[monuj]',
'qfkkj-ojp-ecltytyr-249[itsvz]',
'udglrdfwlyh-udeelw-zrunvkrs-985[dlrue]',
'gsvvswmzi-tpewxmg-kveww-viwievgl-178[vbmit]',
'houngfgxjuay-yigbktmkx-natz-ykxboiky-930[bjyzu]',
'eqttqukxg-uecxgpigt-jwpv-uvqtcig-102[gnzpx]',
'amjmpdsj-bwc-pcqcypaf-288[gsywq]',
'zntargvp-cynfgvp-tenff-npdhvfvgvba-793[vfngp]',
'hdgdovmt-bmvyz-xcjxjgvoz-ozxcijgjbt-811[snjmz]',
'etaqigpke-rncuvke-itcuu-ewuvqogt-ugtxkeg-154[eugtk]',
'pbybeshy-sybjre-ybtvfgvpf-117[vszqj]',
'vqr-ugetgv-tcddkv-uvqtcig-336[hzkdw]',
'ykhknbqh-xwogap-pnwejejc-368[nqxzy]',
'sehheiylu-rkddo-jusxdebewo-634[edhos]',
'mvydjvxodqz-xcjxjgvoz-adivixdib-395[snpmt]',
'ahngzyzqcntr-bzmcx-rsnqzfd-183[zfkpc]',
'qxdwpopgsdjh-eaphixr-vgphh-detgpixdch-973[hpdgx]',
'nij-mywlyn-jlidywncfy-jfumncw-alumm-zchuhwcha-682[nvybs]',
'yhtwhnpun-jhukf-klzpnu-383[hnukp]',
'yhtwhnpun-qlssfilhu-svnpzapjz-175[fopst]',
'sgmtkzoi-jek-jkbkruvsktz-774[whzny]',
'gifavtkzcv-wcfnvi-rercpjzj-711[cvfij]',
'pkl-oaynap-acc-lqnydwoejc-940[aclno]',
'vhkkhlbox-ktuubm-tvjnblbmbhg-475[gyhzn]',
'guahyncw-wfummczcyx-wuhxs-guleyncha-422[iybfa]',
'ydjuhdqjyedqb-hqrryj-sedjqydcudj-816[ftnej]',
'rnqnyfwd-lwfij-gfxpjy-xytwflj-801[phsxc]',
'vdzonmhydc-qzaahs-rdquhbdr-365[quvjr]',
'ucynmlgxcb-zyqicr-rcaflmjmew-444[ncmzr]',
'wfummczcyx-gcfcnuls-aluxy-luvvcn-jolwbumcha-864[culma]',
'qzoggwtwsr-dfcxsqhwzs-suu-qighcasf-gsfjwqs-220[sgqwf]',
'esyfwlau-mfklstdw-uzgugdslw-suimakalagf-918[igapw]',
'zntargvp-wryylorna-genvavat-819[zmnji]',
'ugfkmewj-yjsvw-uzgugdslw-umklgewj-kwjnauw-268[wugjk]',
'xjinphzm-bmvyz-kgvnodx-bmvnn-pnzm-oznodib-109[rfzpw]',
'mvydjvxodqz-mvwwdo-ozxcijgjbt-265[cxfdz]',
'ujoon-rwdrdapit-bpcpvtbtci-375[ptbcd]',
'aczupnetwp-dnlgpyrpc-sfye-afcnsldtyr-457[cydjq]',
'uzfqdzmfuazmx-eomhqzsqd-tgzf-ymzmsqyqzf-898[zmqfd]',
'lugjuacha-luvvcn-ijyluncihm-890[uclah]',
'mtzslklcozfd-upwwjmply-lnbftdtetzy-379[yzwvu]',
'myvybpev-oqq-bocokbmr-328[bomqv]',
'tagzsrsjvgmk-xdgowj-ghwjslagfk-346[anbri]',
'dpmpsgvm-ezf-mphjtujdt-493[dolas]',
'votubcmf-sbccju-efqbsunfou-831[mnvky]',
'zotts-xsy-nywbhifias-188[boqzr]',
'gvcskirmg-glsgspexi-irkmriivmrk-646[igrkm]',
'bnknqetk-cxd-cdozqsldms-625[dcknq]',
'eqnqthwn-lgnnadgcp-cpcnauku-206[ytmnv]',
'kmjezxodgz-wpiit-mznzvmxc-525[gtdob]',
'dpotvnfs-hsbef-fhh-dvtupnfs-tfswjdf-519[fsdht]',
'bwx-amkzmb-kpwkwtibm-lmdmtwxumvb-486[qmvwa]',
'guahyncw-vohhs-lyuwkocmcncih-760[chnou]',
'guahyncw-zfiqyl-nywbhifias-188[szhfp]',
'ksodcbwnsr-foppwh-kcfygvcd-194[cdfko]',
'gpewwmjmih-glsgspexi-jmrergmrk-542[bfqnt]',
'vqr-ugetgv-fag-fgxgnqrogpv-440[gvfqr]',
'encuukhkgf-etaqigpke-tcddkv-ceswkukvkqp-726[fijhw]',
'yrwxefpi-fyrrc-hiwmkr-724[rfiwy]',
'hwdtljsnh-bjfutsneji-hfsid-htfynsl-xfqjx-801[cdbsl]',
'xjmmjndqz-xcjxjgvoz-jkzmvodjin-681[jmxzd]',
'jqwpihizlwca-kivlg-tijwzibwzg-200[jpsbx]',
'fodvvlilhg-froruixo-fkrfrodwh-wudlqlqj-621[yketz]',
'wpuvcdng-fag-fgrctvogpv-518[klean]',
'xgvnndadzy-xviyt-xjvodib-mzxzdqdib-421[oypak]',
'kzeed-gzssd-qtlnxynhx-515[byszr]',
'surmhfwloh-iorzhu-pdqdjhphqw-517[bhpqy]',
'xcitgcpixdcpa-ytaanqtpc-rjhidbtg-htgkxrt-167[kjlxw]',
'yuxufmdk-sdmpq-nmewqf-emxqe-248[cteon]',
'iuxxuyobk-kmm-xkykgxin-306[hjxkm]',
'joufsobujpobm-dboez-dpbujoh-mphjtujdt-675[fwybh]',
'ajmrxjlcren-ouxfna-vjatncrwp-459[ajnrc]',
'nwzekwypera-xwogap-paydjkhkcu-992[akpwe]',
'aietsrmdih-gpewwmjmih-gerhc-stivexmsrw-282[eimhr]',
'sxdobxkdsyxkv-nio-yzobkdsyxc-874[gpnhm]',
'qfkkj-mldvpe-afcnsldtyr-639[dfkla]',
'eqnqthwn-tcddkv-ucngu-674[lekca]',
'lqxlxujcn-bcxajpn-667[aomrt]',
'etaqigpke-dcumgv-wugt-vguvkpi-180[guvei]',
'udglrdfwlyh-mhoobehdq-orjlvwlfv-413[mnizv]',
'wyvqljapsl-jhukf-jvhapun-klzpnu-851[ixkjt]',
'esyfwlau-jsttal-wfyafwwjafy-138[afwyj]',
'bdavqofuxq-ymszqfuo-otaoaxmfq-fdmuzuzs-222[zbyrv]',
'udskkaxawv-wyy-vwkayf-996[drsqf]',
'qzoggwtwsr-qczcftiz-foppwh-gvwddwbu-246[btqac]',
'yaxsnlcrun-yujbcrl-pajbb-nwprwnnarwp-745[cdwey]',
'zhdsrqlchg-gbh-pdunhwlqj-439[hdglq]',
'zekvierkzferc-nvrgfezqvu-treup-tfrkzex-ivjvrity-139[nthvs]',
'vhkkhlbox-ktwbhtvmbox-vtgwr-vhtmbgz-ftgtzxfxgm-709[tbghv]',
'kmjezxodgz-xmtjbzidx-agjrzm-rjmfncjk-863[hramk]',
'qspkfdujmf-kfmmzcfbo-vtfs-uftujoh-129[nmtql]',
'ipvohghykvbz-jhukf-jvhapun-jvuahputlua-591[cyqjf]',
'lahxpnwrl-lqxlxujcn-vjwjpnvnwc-563[wcjme]',
'sbejpbdujwf-qmbtujd-hsbtt-bdrvjtjujpo-493[jbtdu]',
'ujoon-hrpktcvtg-wjci-stepgibtci-115[tdcgy]',
'wlqqp-jtrmvexvi-ylek-rercpjzj-321[pyfoz]',
'qyujihctyx-vohhs-yhachyylcha-604[xfbmz]',
'eza-dpncpe-nlyoj-nzletyr-epnsyzwzrj-925[ytbnm]',
'nzwzcqfw-awldetn-rcldd-lnbftdtetzy-743[tomzn]',
'nwzekwypera-bhksan-yqopkian-oanreya-914[otvsm]',
'avw-zljyla-wshzapj-nyhzz-klclsvwtlua-773[lazwh]',
'plolwdub-judgh-iorzhu-vdohv-179[bmrjz]',
'lnkfaypeha-zua-zarahkliajp-368[tzngm]',
'slqryzjc-afmamjyrc-pcqcypaf-418[tjmsy]',
'frqvxphu-judgh-vfdyhqjhu-kxqw-rshudwlrqv-387[dxhnm]',
'uqtqbizg-ozilm-lgm-nqvivkqvo-928[rmnzf]',
'lqwhuqdwlrqdo-edvnhw-ilqdqflqj-335[ctsda]',
'zloolpfsb-mixpqfz-doxpp-obzbfsfkd-133[txiel]',
'otzkxtgzoutgr-igtje-iugzotm-xkykgxin-592[zywxn]',
'sbnqbhjoh-kfmmzcfbo-nbobhfnfou-701[fihgn]',
'eqttqukxg-tcfkqcevkxg-rncuvke-itcuu-vtckpkpi-960[fobts]',
'xgjougizobk-hatte-jkbkruvsktz-592[ktbgj]',
'bqvvu-bhksan-pnwejejc-836[tcjgz]',
'ikhcxvmbex-unggr-kxvxbobgz-787[opysz]',
'jsehsyafy-usfvq-mkwj-lwklafy-320[msztr]',
'ikhcxvmbex-ktuubm-ftgtzxfxgm-891[ojsny]',
'ajvyjprwp-ajkkrc-mnenuxyvnwc-381[umkpn]',
'pinovwgz-zbb-mznzvmxc-993[aumbt]',
'ajyqqgdgcb-aylbw-rcaflmjmew-678[abcgj]',
'houngfgxjuay-kmm-zxgototm-488[zudop]',
'kmjezxodgz-ezggtwzvi-ozxcijgjbt-707[jyizu]',
'eadalsjq-yjsvw-xdgowj-mkwj-lwklafy-762[jwald]',
'aczupnetwp-ojp-dstaatyr-379[ftyeo]',
'gvcskirmg-yrwxefpi-gerhc-gsexmrk-ywiv-xiwxmrk-958[rgixe]',
'yuxufmdk-sdmpq-rxaiqd-pqbmdfyqzf-196[azivn]',
'qyujihctyx-wuhxs-yhachyylcha-344[zdimn]',
'rflsjynh-idj-yjhmstqtld-515[cvfph]',
'lqwhuqdwlrqdo-edvnhw-rshudwlrqv-257[nlxst]',
'odiih-kdwwh-mnbrpw-953[wdhib]',
'ugdgjxmd-uzgugdslw-klgjsyw-814[cktda]',
'mybbycsfo-pvygob-myxdksxwoxd-744[ifscp]',
'bqvvu-ywjzu-odellejc-316[krcsj]',
'nzcczdtgp-dnlgpyrpc-sfye-cplnbftdtetzy-743[kbdyu]',
'pinovwgz-xcjxjgvoz-xjiovdihzio-525[mhjxn]',
'ckgvutofkj-igtje-xkykgxin-878[vimez]',
'rmn-qcapcr-zsllw-bcnjmwkclr-912[anzkg]',
'ipvohghykvbz-yhiipa-zlycpjlz-227[oyzjt]',
'nwilwcejc-nwxxep-wymqeoepekj-550[ztyes]',
'uzfqdzmfuazmx-otaoaxmfq-pqhqxabyqzf-274[qafzm]',
'hafgnoyr-qlr-chepunfvat-975[zauvm]',
'irdgrxzex-treup-tfrkzex-ljvi-kvjkzex-269[lozqp]',
'udskkaxawv-bwddqtwsf-klgjsyw-944[nmsxw]',
'ujoon-rpcsn-gtprfjxhxixdc-921[xcjno]',
'jrncbavmrq-enoovg-qrfvta-195[aqyxs]',
'xgsvgmotm-yigbktmkx-natz-iutzgotsktz-462[smvtd]',
'sxdobxkdsyxkv-mkxni-zebmrkcsxq-770[zkwax]',
'zbytomdsvo-oqq-crszzsxq-614[iwqoc]',
'wfruflnsl-kqtbjw-qfgtwfytwd-801[imsvp]',
'nglmtuex-wrx-ybgtgvbgz-787[etuki]',
'ixeumktoi-yigbktmkx-natz-uvkxgzouty-774[hylkz]',
'cxy-bnlanc-ljwmh-cnlqwxuxph-719[unwgt]',
'xcitgcpixdcpa-ytaanqtpc-hidgpvt-453[cptai]',
'udglrdfwlyh-fkrfrodwh-fxvwrphu-vhuylfh-985[ntvum]',
'dsxxw-glrcplyrgmlyj-afmamjyrc-yaosgqgrgml-912[hstuv]',
'nglmtuex-xzz-wxiehrfxgm-267[wjyqr]',
'ixccb-fdqgb-xvhu-whvwlqj-153[bchqv]',
'vhehkyne-vtgwr-vhtmbgz-ehzblmbvl-449[mnuht]',
'xgjougizobk-hgyqkz-jkyomt-774[sdeqt]',
'ktiaaqnqml-zijjqb-abwziom-252[fgisn]',
'glrcplyrgmlyj-cee-pcyaosgqgrgml-470[osmrp]',
'qjopwxha-lhwopey-cnwoo-nawymqeoepekj-472[rfgnc]',
'plolwdub-judgh-fdqgb-vklsslqj-959[ldbgj]',
'yknnkoera-zua-wymqeoepekj-472[rsqmj]',
'ujoon-ytaanqtpc-gtrtxkxcv-635[tacno]',
'tcfkqcevkxg-fag-ujkrrkpi-856[njiek]',
'uzfqdzmfuazmx-bdavqofuxq-qss-dqmocgueufuaz-170[wktxs]',
'ltpedcxots-rpcsn-hwxeexcv-375[draoy]',
'zntargvp-pnaql-nanylfvf-585[iejdf]',
'lqwhuqdwlrqdo-vfdyhqjhu-kxqw-orjlvwlfv-101[szrtk]',
'dyz-combod-oqq-cdybkqo-614[odqbc]',
'jyddc-fyrrc-wepiw-802[chjtm]',
'nglmtuex-yehpxk-phkdlahi-319[cjshn]',
'surmhfwloh-fdqgb-xvhu-whvwlqj-205[vzite]',
'wrs-vhfuhw-udeelw-dqdobvlv-179[znwyc]',
'myxcewob-qbkno-lkcuod-cdybkqo-328[ysbjw]',
'ixeumktoi-inuiurgzk-giwaoyozout-124[vnlcr]',
'vhehkyne-utldxm-nlxk-mxlmbgz-839[cnryt]',
'tagzsrsjvgmk-tskcwl-klgjsyw-216[sgkjl]',
'bjfutsneji-idj-ijajqturjsy-203[gkoyb]',
'plolwdub-judgh-gbh-wudlqlqj-335[snabj]',
'ydjuhdqjyedqb-sqdto-seqjydw-tufbeocudj-504[cawtu]',
'fkqbokxqflkxi-ciltbo-jxohbqfkd-913[mthlx]',
'eadalsjq-yjsvw-tskcwl-sfsdqkak-840[sakdj]',
'tipfxvezt-dzczkrip-xiruv-irsszk-ivtvzmzex-347[ewgst]',
'plolwdub-judgh-fdqgb-pdqdjhphqw-309[slvtc]',
'nwilwcejc-iehepwnu-cnwza-ywjzu-naoawnyd-914[wnace]',
'lejkrscv-irsszk-fgvirkzfej-633[krsef]',
'qzlozfhmf-bgnbnkzsd-qdrdzqbg-105[ltrxz]',
'hmsdqmzshnmzk-bkzrrhehdc-idkkxadzm-sqzhmhmf-833[vsftk]',
'mbggf-wyvqljapsl-ibuuf-ylhjxbpzpapvu-565[kxetg]',
'vehmsegxmzi-tpewxmg-kveww-erepcwmw-360[ewmgp]',
'mhi-lxvkxm-xzz-wxoxehifxgm-475[zvuxn]',
'pejji-nio-gybucryz-848[scvwe]',
'oqnidbshkd-rbzudmfdq-gtms-cdozqsldms-729[dsmqb]',
'hmsdqmzshnmzk-lzfmdshb-idkkxadzm-btrsnldq-rdquhbd-937[yncvm]',
'fmsledevhsyw-wgezirkiv-lyrx-jmrergmrk-932[bgytv]',
'houngfgxjuay-lruckx-iayzuskx-ykxboik-462[vefmw]',
'njmjubsz-hsbef-dboez-vtfs-uftujoh-337[bfjsu]',
'lhkhszqx-fqzcd-azrjds-cdozqsldms-157[amnks]',
'dpotvnfs-hsbef-dipdpmbuf-tbmft-103[zhrge]',
'sebehvkb-rqiauj-jusxdebewo-166[jscdv]',
'diozmivodjivg-pinovwgz-ezggtwzvi-mznzvmxc-811[ysntj]',
'kwtwznct-kpwkwtibm-zmamizkp-902[kwmtz]',
'jyddc-fyrrc-asvowlst-828[ybfgd]',
'tagzsrsjvgmk-vqw-ugflsafewfl-580[xmqnr]',
'mfklstdw-tmffq-kwjnauwk-502[fkwmt]',
'xcitgcpixdcpa-rpcsn-rdpixcv-rdcipxcbtci-453[ynzqc]',
'fmsledevhsyw-wgezirkiv-lyrx-hitevxqirx-542[eirvx]',
'ojk-nzxmzo-zbb-ozxcijgjbt-213[zbjox]',
'gpewwmjmih-hci-vigimzmrk-932[injxy]',
'gpsxdprixkt-qjccn-hpath-349[pchtx]',
'diozmivodjivg-nxvqzibzm-cpio-omvdidib-109[jivdx]',
'dzczkrip-xiruv-srjbvk-crsfirkfip-269[mbnda]',
'lgh-kwujwl-usfvq-ugslafy-vwhsjlewfl-580[ftwxr]',
'ymszqfuo-rxaiqd-xmnadmfadk-456[smxyt]',
'slqryzjc-njyqrga-epyqq-kypicrgle-158[ktcij]',
'apwmeclga-hcjjwzcyl-bctcjmnkclr-496[cjlam]',
'pejji-zvkcdsm-qbkcc-gybucryz-770[junto]',
'kzeed-kqtbjw-xmnuunsl-931[eknub]',
'fhezusjybu-sedikcuh-whqtu-hqrryj-cqhaujydw-322[kogjn]',
'encuukhkgf-uecxgpigt-jwpv-fgrnqaogpv-648[gpuce]',
'fnjyxwrinm-kjbtnc-bjunb-225[nbjcf]',
'gifavtkzcv-sleep-kvtyefcfxp-737[cusry]',
'nij-mywlyn-xsy-fiacmncwm-266[txfyk]',
'pxtihgbsxw-cxeeruxtg-labiibgz-501[vfryk]',
'oaxadrgx-vqxxknqmz-etubbuzs-612[gmzpy]',
'oxaflxzqfsb-yxphbq-obzbfsfkd-497[gsjqc]',
'lxaaxbren-ajmrxjlcren-kdwwh-uxprbcrlb-563[rxabl]',
'drxevkzt-jtrmvexvi-ylek-ivrthlzjzkzfe-893[evzkr]',
'xst-wigvix-veqtekmrk-hci-hizipstqirx-334[itxeh]',
'hqcfqwydw-sxesebqju-bewyijysi-556[upyan]',
'oxjmxdfkd-avb-abmxoqjbkq-809[cuyzr]',
'vkppo-isqludwuh-xkdj-husuylydw-686[sfajr]',
'nwilwcejc-ykjoqian-cnwza-ywjzu-ykjpwejiajp-576[jwaci]',
'gpewwmjmih-hci-jmrergmrk-360[jftsi]',
'ibghopzs-gqojsbusf-vibh-gvwddwbu-324[ratwb]',
'mvhkvbdib-wvnfzo-gvwjmvojmt-785[zljyf]',
'lahxpnwrl-mhn-bjunb-719[kexws]',
'iuxxuyobk-inuiurgzk-vaxingyotm-722[snwtx]',
'rnqnyfwd-lwfij-xhfajsljw-mzsy-wjhjnansl-671[nyphz]',
'uwtojhynqj-wfggny-yjhmstqtld-229[qzmtg]',
'dfcxsqhwzs-pibbm-qighcasf-gsfjwqs-220[iucxp]',
'eqttqukxg-dwppa-uvqtcig-362[setiw]',
'gcfcnuls-aluxy-vohhs-guleyncha-500[gchrt]',
'ocipgvke-dwppa-gpikpggtkpi-518[tihzs]',
'qfmcusbwq-dzoghwq-ufogg-cdsfohwcbg-454[ylkjh]',
'yhtwhnpun-msvdly-zlycpjlz-123[qjnxy]',
'lxuxaodu-ljwmh-lxjcrwp-anjlzdrbrcrxw-199[lnesc]',
'qekrixmg-jpsaiv-tyvglewmrk-646[egikm]',
'emixwvqhml-moo-wxmzibqwva-720[mwioq]',
'kpvgtpcvkqpcn-dwppa-cpcnauku-934[bmprc]',
'lugjuacha-vumeyn-mylpcwym-604[muyac]',
'lzfmdshb-ahngzyzqcntr-dff-nodqzshnmr-937[pwdof]',
'sbqiiyvyut-vkppo-zubboruqd-cqdqwucudj-348[osntj]',
'ubhatstkwhnl-wrx-ftkdxmbgz-111[fojwk]',
'mvhkvbdib-zbb-ncdkkdib-967[brnch]',
'vetllbybxw-utldxm-mktbgbgz-579[gljhz]',
'htsxzrjw-lwfij-gzssd-btwpxmtu-333[rhtqs]',
'nzcczdtgp-nlyoj-nzletyr-xlcvpetyr-587[smnct]',
'pybgmyargtc-zsllw-jyzmpyrmpw-366[zutsy]',
'kfg-jvtivk-upv-jvimztvj-997[xicjt]',
'kpvgtpcvkqpcn-hnqygt-ewuvqogt-ugtxkeg-388[gtkpq]',
'ktiaaqnqml-kpwkwtibm-mvoqvmmzqvo-564[asert]',
'mfklstdw-uzgugdslw-dgyaklauk-892[dgklu]',
'gzefmnxq-pkq-xmnadmfadk-196[madfk]',
'qcbgiasf-ufors-dzoghwq-ufogg-ghcfous-714[yiavh]',
'veqtekmrk-nippcfier-asvowlst-958[thmsn]',
'iehepwnu-cnwza-zua-skngodkl-134[naeku]',
'xfbqpojafe-fhh-tfswjdft-571[fhjta]',
'laffe-jek-ygrky-150[pjbaq]',
'lxwbdvna-pajmn-kjbtnc-fxatbqxy-459[xbmlo]',
'wkqxodsm-bkllsd-bomosfsxq-276[isaey]',
'ovbunmneqbhf-fpniratre-uhag-qrirybczrag-741[rabne]',
'xjmmjndqz-mvwwdo-kpmxcvndib-395[hyolt]',
'zbytomdsvo-mkxni-mykdsxq-bomosfsxq-276[efcvd]',
'jqwpihizlwca-bwx-amkzmb-jcvvg-ivitgaqa-252[dsmej]',
'dszphfojd-sbccju-nbobhfnfou-805[bfocd]',
'hwbba-lgnnadgcp-vgejpqnqia-830[ptkvu]',
'wfintfhynaj-hmthtqfyj-fhvznxnynts-125[nfhty]',
'wfummczcyx-wbiwifuny-mbcjjcha-864[cmwbf]',
'tfcfiwlc-avccpsvre-uvjzxe-269[cvefa]',
'dlhwvupglk-msvdly-dvyrzovw-331[nmeji]',
'fnjyxwrinm-npp-orwjwlrwp-979[tqyvp]',
'sgmtkzoi-hgyqkz-xkgiwaoyozout-904[ogkzi]',
'qjopwxha-oywrajcan-dqjp-opknwca-680[qgsnh]',
'oxjmxdfkd-zxkav-pxibp-185[mtfwl]',
'mfklstdw-wyy-lwuzfgdgyq-814[iznma]',
'zlilocri-bdd-pefmmfkd-341[intay]',
'rwcnawjcrxwju-kdwwh-cajrwrwp-849[wrcja]',
'xmrrq-uzgugdslw-jwuwanafy-372[uwagr]',
'kzgwomvqk-zijjqb-ewzsapwx-278[wzjkq]',
'lxuxaodu-ouxfna-ujkxajcxah-277[xaujo]',
'nwzekwypera-iehepwnu-cnwza-fahhuxawj-zalhkuiajp-186[nbzay]',
'vhkkhlbox-ktuubm-hixktmbhgl-943[byecg]',
'qxdwpopgsdjh-uadltg-prfjxhxixdc-141[eqzvr]',
'kyelcrga-zsllw-pcacgtgle-522[lcgae]',
'qzoggwtwsr-ksodcbwnsr-dzoghwq-ufogg-zcuwghwqg-818[dzclv]',
'xekdwvwnzkqo-lhwopey-cnwoo-bejwjyejc-238[ksrhc]',
'npmhcargjc-cee-bctcjmnkclr-782[tgrxz]',
'hwbba-ecpfa-eqcvkpi-ceswkukvkqp-336[kcepa]',
'uwtojhynqj-hmthtqfyj-wjhjnansl-437[kylvd]',
'sxdobxkdsyxkv-bknsykmdsfo-oqq-kmaescsdsyx-380[skdxo]',
'rmn-qcapcr-afmamjyrc-dglylagle-158[aclmr]',
'raphhxuxts-gpqqxi-advxhixrh-583[vuafx]',
'nij-mywlyn-xsy-jolwbumcha-760[mkhgy]',
'lnkfaypeha-xqjju-odellejc-680[ghsol]',
'ytu-xjhwjy-wfggny-zxjw-yjxynsl-931[tynvp]',
'myxcewob-qbkno-lkcuod-nofovyzwoxd-744[obcdk]',
'diozmivodjivg-mvwwdo-gvwjmvojmt-161[vpsib]',
'vhkkhlbox-cxeeruxtg-ehzblmbvl-787[behlx]',
'iruzfrtkzmv-treup-tfrkzex-jvimztvj-841[xmwvi]',
'kyelcrga-cee-pcacgtgle-730[yladm]',
'iutyaskx-mxgjk-yigbktmkx-natz-jkyomt-436[tznsm]',
'hvbizodx-agjrzm-ncdkkdib-421[iqnst]',
'xzwrmkbqtm-kivlg-lmxtwgumvb-252[tkefm]',
'udpsdjlqj-vfdyhqjhu-kxqw-pdqdjhphqw-439[dqhjp]',
'mbggf-ibuuf-mpuhujpun-695[gnpdh]',
'irdgrxzex-avccpsvre-ivtvzmzex-997[stcvr]',
'sawlkjevaz-ywjzu-opknwca-602[awjkz]',
'wlqqp-irsszk-tljkfdvi-jvimztv-217[mrqcl]',
'sehheiylu-tou-sedjqydcudj-192[teamg]',
'ajyqqgdgcb-pyzzgr-kylyeckclr-470[cjtul]',
'ziuxioqvo-moo-bziqvqvo-278[ixnjm]',
'vjpwncrl-kjbtnc-nwprwnnarwp-199[nwprc]',
'wpuvcdng-gii-hkpcpekpi-986[picgk]',
'dmybmsuzs-nmewqf-iadwetab-456[mabde]',
'rtqlgevkng-tcddkv-fgukip-830[juyzr]',
'votubcmf-dipdpmbuf-sfbdrvjtjujpo-727[rcslk]',
'vhkkhlbox-wrx-ftgtzxfxgm-163[txmwq]',
'zloolpfsb-mixpqfz-doxpp-qbzeklildv-289[lpozb]',
'rkpqxyib-zxkav-zlxqfkd-xkxivpfp-393[bpquj]',
'pbafhzre-tenqr-qlr-freivprf-455[byihv]',
'kzeed-hfsid-htfynsl-jslnsjjwnsl-905[egluv]',
'nlyoj-nzletyr-lylwjdtd-899[ymzvu]',
'laffe-pkrrehkgt-sgxqkzotm-800[gsdnt]',
'ynukcajey-xwogap-zaoecj-212[ewjah]',
'rgndvtcxr-bxaxipgn-vgpst-hrpktcvtg-wjci-sthxvc-661[zwgis]',
'ujqgywfau-vqw-vwnwdghewfl-580[istzh]',
'jyddc-tvsnigxmpi-fewoix-hiwmkr-178[idmwx]',
'fab-eqodqf-omzpk-etubbuzs-430[befoq]',
'ibghopzs-suu-fsoqeiwgwhwcb-792[qzyrk]',
'sedikcuh-whqtu-uww-vydqdsydw-166[dwuhq]',
'cjpibabsepvt-qmbtujd-hsbtt-tupsbhf-467[wsrek]',
'aietsrmdih-ikk-gsrxemrqirx-594[irekm]',
'bwx-amkzmb-jcvvg-bziqvqvo-694[vbmqz]',
'ixccb-exqqb-xvhu-whvwlqj-647[qxbch]',
'nwlddtqtpo-nlyoj-dezclrp-639[dlnop]',
'ahngzyzqcntr-azrjds-otqbgzrhmf-833[oquzx]',
'xcitgcpixdcpa-gpqqxi-ejgrwphxcv-193[cpxgi]',
'buzahisl-zjhclunly-obua-svnpzapjz-643[zalub]',
'excdklvo-mkxni-mykdsxq-nozkbdwoxd-458[fxjwy]',
'jvyyvzpcl-jyfvnlupj-msvdly-klclsvwtlua-253[injkl]',
'emixwvqhml-akidmvomz-pcvb-zmkmqdqvo-434[mvqdi]',
'dszphfojd-qmbtujd-hsbtt-sfbdrvjtjujpo-831[ykcho]',
'kwzzwaqdm-zijjqb-camz-bmabqvo-902[zabmq]',
'nvrgfezqvu-irsszk-jkfirxv-789[neysm]',
'yflexwxoalrp-zelzlixqb-jxohbqfkd-679[lxbef]',
'wfummczcyx-yaa-nluchcha-344[rmpgs]',
'tcfkqcevkxg-gii-ocpcigogpv-674[stfma]',
'nwzekwypera-ywjzu-ajcejaanejc-316[xoprq]',
'qzlozfhmf-idkkxadzm-btrsnldq-rdquhbd-105[pawus]',
'uqtqbizg-ozilm-kivlg-uizsmbqvo-356[xhvzm]',
'apuut-kgvnodx-bmvnn-mzvxlpdndodji-473[dnvmo]',
'wfummczcyx-dyffsvyuh-guhuaygyhn-500[yufhc]',
'cybyjqho-whqtu-shoewudys-vbemuh-tuiywd-842[huywb]',
'sedikcuh-whqtu-tou-jhqydydw-920[dhuqt]',
'zekvierkzferc-wcfnvi-ljvi-kvjkzex-815[ibzsg]',
'froruixo-udeelw-orjlvwlfv-777[zyusv]',
'oxmeeuruqp-ngzzk-qzsuzqqduzs-222[mcqzj]',
'gpewwmjmih-ikk-ywiv-xiwxmrk-568[uszki]',
'aietsrmdih-jpsaiv-wepiw-334[iaeps]',
'joufsobujpobm-cbtlfu-dvtupnfs-tfswjdf-441[fubjo]',
'xst-wigvix-hci-viwievgl-256[jnlst]',
'oknkvcta-itcfg-wpuvcdng-dwppa-qrgtcvkqpu-414[dcxst]',
'rnqnyfwd-lwfij-gfxpjy-htsyfnsrjsy-957[whlxr]',
'sbnqbhjoh-fhh-tbmft-181[jndic]',
'slqryzjc-afmamjyrc-qcptgacq-236[soytq]',
'rgllk-qss-ymdwqfuzs-768[ynsjx]',
'nwilwcejc-ywjzu-ykwpejc-pnwejejc-966[fthzx]',
'gokzyxsjon-lexxi-gybucryz-146[qomrj]',
'cybyjqho-whqtu-zubboruqd-cqhaujydw-192[naxrp]',
'esyfwlau-bwddqtwsf-vwkayf-294[acbmr]',
'shmml-rtt-znantrzrag-455[vseqh]',
'qlm-pbzobq-yrkkv-obpbxoze-601[lfewg]',
'fbebmtkr-zktwx-utldxm-ybgtgvbgz-995[stygr]',
'ikhcxvmbex-ietlmbv-zktll-vnlmhfxk-lxkobvx-943[lxkvb]',
'rgndvtcxr-hrpktcvtg-wjci-prfjxhxixdc-193[crxtd]',
'ucynmlgxcb-pyzzgr-kypicrgle-158[qtuyz]',
'wpuvcdng-lgnnadgcp-vtckpkpi-466[sftpn]',
'tfiifjzmv-treup-kirzezex-633[gjtxn]',
'cvabijtm-jiasmb-lmxizbumvb-356[axbtn]',
'xgvnndadzy-kgvnodx-bmvnn-yzkgjthzio-395[byaut]',
'rdchjbtg-vgpst-tvv-pcpanhxh-765[hptvc]',
'hqcfqwydw-isqludwuh-xkdj-cqhaujydw-322[mzgty]',
'hvbizodx-wvnfzo-gjbdnodxn-577[kljip]',
'udpsdjlqj-fruurvlyh-fdqgb-frdwlqj-vdohv-283[ktzrm]',
'xjmmjndqz-zbb-jkzmvodjin-707[jmzbd]',
'glrcplyrgmlyj-zsllw-nspafyqgle-184[ptidc]',
'zvyvgnel-tenqr-cebwrpgvyr-fpniratre-uhag-hfre-grfgvat-377[zesyf]',
'ubhatstkwhnl-vahvhetmx-wxitkmfxgm-605[qzvme]',
'iqmbazulqp-dmnnuf-ruzmzouzs-898[shtag]',
'surmhfwloh-gbh-ghsorbphqw-179[sickj]',
'htqtwkzq-uqfxynh-lwfxx-xjwanhjx-593[rwtbd]',
'kgjgrypw-epybc-njyqrga-epyqq-rpyglgle-600[jkcuz]',
'ktiaaqnqml-zijjqb-zmikycqaqbqwv-642[qaibj]',
'udglrdfwlyh-fdqgb-frdwlqj-sxufkdvlqj-803[cmkaq]',
'oxmeeuruqp-pkq-pqbmdfyqzf-196[qpefm]',
'dzczkrip-xiruv-sleep-ivjvrity-893[xjomn]',
'dmpuamofuhq-ymszqfuo-otaoaxmfq-fdmuzuzs-482[mufoa]',
'nbhofujd-ezf-tbmft-389[cjgtr]',
'uzfqdzmfuazmx-otaoaxmfq-iadwetab-820[afmzd]',
'froruixo-fdqgb-oderudwrub-205[ziyjo]',
'ugjjgkanw-kusnwfywj-zmfl-dstgjslgjq-840[nhxgt]',
'zhdsrqlchg-iorzhu-sxufkdvlqj-101[qtzca]',
'dsxxw-aylbw-amyrgle-qcptgacq-912[qdwmn]',
'veqtekmrk-hci-xiglrspskc-282[tfuln]',
'qcffcgwjs-forwcoqhwjs-rms-hfowbwbu-480[hgnsl]',
'jvsvymbs-jovjvshal-zhslz-539[ocmvj]',
'enqvbnpgvir-rtt-fuvccvat-195[nmrlc]',
'oxmeeuruqp-bxmefuo-sdmee-geqd-fqefuzs-170[uenfm]',
'wlsiayhcw-wfummczcyx-xsy-xyjfisgyhn-214[zpqtr]',
'jxdkbqfz-avb-obxznrfpfqflk-133[fbkqx]',
'zvyvgnel-tenqr-pnaql-bcrengvbaf-195[eghnt]',
'diozmivodjivg-ytz-vxlpdndodji-551[diovj]',
'jyddc-jpsaiv-ywiv-xiwxmrk-308[idjvw]',
'pbafhzre-tenqr-enoovg-hfre-grfgvat-403[erfga]',
'pbeebfvir-enoovg-ernpdhvfvgvba-793[vebfg]',
'wfummczcyx-zfiqyl-uhufsmcm-292[tvcdw]',
'kwvacumz-ozilm-zijjqb-ivitgaqa-746[iazjm]',
'udpsdjlqj-ixccb-sodvwlf-judvv-ghyhorsphqw-465[kgyhw]',
'tcrjjzwzvu-wcfnvi-rthlzjzkzfe-451[zjcfr]',
'qxdwpopgsdjh-rpcsn-htgkxrth-947[dkwlb]',
'yaxsnlcrun-kdwwh-bnaerlnb-953[zyncx]',
'zhdsrqlchg-vfdyhqjhu-kxqw-xvhu-whvwlqj-699[mriyj]',
'bkzrrhehdc-bgnbnkzsd-lzqjdshmf-417[izpxt]',
'nwzekwypera-fahhuxawj-wjwhuoeo-862[wsmnu]',
'zgmfyxypbmsq-bwc-rcaflmjmew-964[nmpwb]',
'guahyncw-wuhxs-wihnuchgyhn-188[vmayn]',
'kwtwznct-jcvvg-uizsmbqvo-980[ryzva]',
'udskkaxawv-lgh-kwujwl-uzgugdslw-vwhdgqewfl-138[trijm]',
'shoewudys-rkddo-huiuqhsx-868[tozlb]',
'bnknqetk-atmmx-rzkdr-183[zbyuw]',
'hwdtljsnh-idj-xytwflj-983[sztqp]',
'tipfxvezt-jtrmvexvi-ylek-cfxzjkztj-165[gwmzp]',
'clotzlnetgp-nsznzwlep-opdtry-327[khryz]',
'mhi-lxvkxm-bgmxkgtmbhgte-wrx-ltexl-449[cfrql]',
'hqtyeqsjylu-sxesebqju-cqdqwucudj-686[ngaly]',
'zlkprjbo-doxab-avb-ildfpqfzp-211[ponbm]',
'bqvvu-zua-ykjpwejiajp-420[mdlgx]',
'tfcfiwlc-treup-tfrkzex-drerxvdvek-841[fmlyq]',
'rmn-qcapcr-qaytclecp-fslr-bctcjmnkclr-652[zksta]',
'oazegyqd-sdmpq-nmewqf-xasuefuoe-248[qypjm]',
'kwtwznct-lgm-tijwzibwzg-278[ytdmc]',
'wlqqp-gcrjkzt-xirjj-rthlzjzkzfe-555[xzkhl]',
'gpewwmjmih-gerhc-erepcwmw-646[stnzy]',
'dmybmsuzs-qss-eqdhuoqe-144[sqdem]',
'yflexwxoalrp-zelzlixqb-qoxfkfkd-705[gtnxw]',
'votubcmf-cvooz-sftfbsdi-701[qhtkp]',
'iuruxlar-xghhoz-gtgreyoy-306[fnctb]',
'slqryzjc-bwc-qyjcq-574[pozts]',
'wdjcvuvmyjpn-nxvqzibzm-cpio-gjbdnodxn-525[pcmhn]',
'excdklvo-mkxni-mykdsxq-vklybkdybi-744[vmsba]',
'pybgmyargtc-hcjjwzcyl-sqcp-rcqrgle-912[nhxvg]',
'wifilzof-wuhxs-xypyfijgyhn-760[nuovc]',
'ktiaaqnqml-jcvvg-lmaqov-512[nwxms]',
'kfg-jvtivk-tyftfcrkv-crsfirkfip-945[fkirt]',
'tfiifjzmv-avccpsvre-ivjvrity-581[fnemb]',
'zotts-wuhxs-wiuncha-xyjfisgyhn-786[tzjse]',
'ynssr-ktuubm-phkdlahi-579[bemfv]',
'xlrypetn-mldvpe-dlwpd-145[rnfmz]',
'mfklstdw-wyy-wfyafwwjafy-840[lksij]',
'dsxxw-aylbw-amlryglkclr-938[larwx]',
'wyvqljapsl-msvdly-mpuhujpun-435[gzlnx]',
'bxaxipgn-vgpst-hrpktcvtg-wjci-steadnbtci-323[tcgip]',
'oxjmxdfkd-mixpqfz-doxpp-abpfdk-471[dpxfk]',
'tipfxvezt-szfyrqriuflj-avccpsvre-ivrthlzjzkzfe-321[zfrve]',
'qyujihctyx-mwupyhayl-bohn-uwkocmcncih-708[chyui]',
'zilqwikbqdm-kivlg-apqxxqvo-434[obtmp]',
'willimcpy-luvvcn-qilembij-578[ilcmv]',
'rflsjynh-kqtbjw-ijxnls-437[zskut]',
'jsvagsulanw-hdsklau-yjskk-klgjsyw-814[uzmvg]',
'oknkvcta-itcfg-tcddkv-yqtmujqr-908[zdync]',
'zvyvgnel-tenqr-pnaql-pbngvat-qrcyblzrag-741[gvtck]',
'xjgjmapg-zbb-adivixdib-395[ovzan]',
'gvcskirmg-tpewxmg-kveww-wlmttmrk-464[szvty]',
'gpewwmjmih-gerhc-gsexmrk-wlmttmrk-620[fqrba]',
'jchipqat-gpqqxi-jhtg-ithixcv-661[ihqtc]',
'bjfutsneji-ojqqdgjfs-ijufwyrjsy-619[jfsiq]',
'gvcskirmg-fyrrc-hitpscqirx-308[kwgpv]',
'mvhkvbdib-wpiit-rjmfncjk-525[stlem]',
'bnqqnrhud-qzaahs-cdrhfm-339[pbsax]',
'qfmcusbwq-qvcqczohs-rsgwub-116[qcsbu]',
'qyujihctyx-yaa-guhuaygyhn-734[yahug]',
'jqwpihizlwca-kivlg-kwibqvo-uizsmbqvo-616[qozkj]',
'nglmtuex-vtgwr-ybgtgvbgz-865[gtbve]',
'encuukhkgf-ecpfa-eqcvkpi-yqtmujqr-414[cekqu]',
'jchipqat-ytaanqtpc-tcvxcttgxcv-375[jnfcy]',
'mvydjvxodqz-xviyt-xjvodib-vxlpdndodji-915[spxnt]',
'wfintfhynaj-hmthtqfyj-xmnuunsl-151[nfhtj]',
'pynffvsvrq-sybjre-ratvarrevat-611[rvaef]',
'hplazytkpo-dnlgpyrpc-sfye-nfdezxpc-dpcgtnp-327[uzifr]',
'cybyjqho-whqtu-vbemuh-qdqboiyi-374[zlsdw]',
'yhkpvhjapcl-wshzapj-nyhzz-svnpzapjz-825[bpcty]',
'zhdsrqlchg-mhoobehdq-fxvwrphu-vhuylfh-179[hdflo]',
'ynukcajey-ywjzu-ykwpejc-qoan-paopejc-238[bpemf]',
'vehmsegxmzi-gerhc-gsexmrk-pefsvexsvc-256[umtvy]',
'lugjuacha-jfumncw-alumm-uwkocmcncih-240[bzlaw]',
'nvrgfezqvu-jtrmvexvi-ylek-jkfirxv-165[ywnzb]',
'fhezusjybu-uww-fkhsxqiydw-920[hgusy]',
'bgmxkgtmbhgte-vahvhetmx-hixktmbhgl-189[ebaun]',
'apwmeclga-afmamjyrc-pcqcypaf-496[uyatz]',
'pdjqhwlf-vfdyhqjhu-kxqw-whfkqrorjb-829[xogzy]',
'crwwv-rkpqxyib-zxkav-rpbo-qbpqfkd-289[bkpqr]',
'tfcfiwlc-irsszk-kvtyefcfxp-893[fciks]',
'lejkrscv-wcfnvi-ivjvrity-191[uyalq]',
'ktfitzbgz-unggr-inkvatlbgz-111[lmwni]',
'fodvvlilhg-fdqgb-frdwlqj-frqwdlqphqw-205[kxstn]',
'odkasqzuo-bxmefuo-sdmee-fqotzaxask-638[oaesd]',
'pdjqhwlf-fdqgb-frdwlqj-rshudwlrqv-569[xcdnu]',
'ckgvutofkj-vrgyzoi-mxgyy-jkvgxzsktz-592[murta]',
'eza-dpncpe-mtzslklcozfd-clmmte-dezclrp-353[celzd]',
'vehmsegxmzi-nippcfier-wxsveki-516[vqrpz]',
'rflsjynh-jll-ijufwyrjsy-853[taqil]',
'jvyyvzpcl-msvdly-hjxbpzpapvu-513[nyxtz]',
'iehepwnu-cnwza-fahhuxawj-oanreyao-264[ulyvn]',
'gcfcnuls-aluxy-wbiwifuny-omyl-nymncha-526[snoxf]',
'oaxadrgx-dmpuamofuhq-omzpk-etubbuzs-638[uamob]',
'oxmeeuruqp-nmewqf-dqeqmdot-352[kjtia]',
'bqxnfdmhb-dff-btrsnldq-rdquhbd-521[iyjvl]',
'joufsobujpobm-fhh-pqfsbujpot-701[obfjp]',
'hafgnoyr-pnaql-pbngvat-fnyrf-845[vtsod]',
'mhi-lxvkxm-wrx-kxvxbobgz-657[npmfu]',
'jrncbavmrq-pnaql-chepunfvat-299[tnzyx]',
'xgsvgmotm-lruckx-gtgreyoy-566[cuxte]',
'vcibutulxiom-wbiwifuny-xyjfisgyhn-994[uayrz]',
'wyvqljapsl-zjhclunly-obua-vwlyhapvuz-123[stmdx]',
'tvsnigxmpi-jpsaiv-stivexmsrw-568[lhxnw]',
'bnqqnrhud-azrjds-cdrhfm-703[pozyr]',
'mfklstdw-tmffq-ugflsafewfl-164[flmst]',
'tcorcikpi-hnqygt-fgukip-778[hzsum]',
'amppmqgtc-hcjjwzcyl-qfgnngle-262[yfnvu]',
'ynssr-vtgwr-ehzblmbvl-631[zxbiy]',
'slqryzjc-aylbw-amyrgle-dglylagle-938[lyage]',
'vrurcjah-pajmn-snuuhknjw-jwjuhbrb-745[dphsv]',
'oaddaeuhq-eomhqzsqd-tgzf-etubbuzs-222[inrke]',
'yrwxefpi-veffmx-qerekiqirx-542[efirx]',
'nchhg-rmttgjmiv-bziqvqvo-304[vghim]',
'vkppo-vbemuh-efuhqjyedi-244[fetmj]',
'tyepcyletzylw-nlyoj-nzletyr-qtylyntyr-379[ngrvh]',
'otzkxtgzoutgr-hatte-giwaoyozout-228[tjxcr]',
'rgllk-eomhqzsqd-tgzf-pqbxakyqzf-534[lzpxc]',
'zgmfyxypbmsq-zyqicr-jyzmpyrmpw-704[ewjdl]',
'vehmsegxmzi-gerhc-gsexmrk-qevoixmrk-204[tyisn]',
'lugjuacha-zfiqyl-xyjfisgyhn-890[fcsxk]',
'qjopwxha-ywjzu-ykwpejc-zaoecj-810[rvmet]',
'bnknqetk-sno-rdbqds-idkkxadzm-knfhrshbr-521[kdnbr]',
'ksodcbwnsr-tzcksf-cdsfohwcbg-818[mnker]',
'vdzonmhydc-rbzudmfdq-gtms-lzqjdshmf-183[abjvy]',
'wfummczcyx-luvvcn-yhachyylcha-630[cyhal]',
'zvyvgnel-tenqr-rtt-freivprf-351[zymsx]',
'fhezusjybu-sqdto-jusxdebewo-972[amzjn]',
'amjmpdsj-zyqicr-cleglccpgle-106[vdopa]',
'hjgbwuladw-wyy-kwjnauwk-502[mrbin]',
'willimcpy-vohhs-mbcjjcha-734[vsbfq]',
'xjgjmapg-xcjxjgvoz-omvdidib-317[wynjr]',
'htqtwkzq-idj-xytwflj-385[tjqwd]',
'ynssr-xzz-xgzbgxxkbgz-579[nsivc]',
'xgvnndadzy-agjrzm-rjmfncjk-473[toazb]',
'pbeebfvir-sybjre-fuvccvat-533[stklm]',
'jvuzbtly-nyhkl-ibuuf-aljouvsvnf-851[lmnuf]',
'ziuxioqvo-ntwemz-uiviomumvb-902[arxeu]',
'vrurcjah-pajmn-ajkkrc-mnbrpw-589[qmtsy]',
'dwbcjkun-bljenwpna-qdwc-vjwjpnvnwc-537[nwjcb]',
'oxjmxdfkd-mixpqfz-doxpp-jxkxdbjbkq-913[xdjkp]',
'mvkccspson-mkxni-mykdsxq-vyqscdsmc-432[stfwy]',
'rflsjynh-htwwtxnaj-jll-qtlnxynhx-879[jsdzt]',
'pelbtravp-onfxrg-chepunfvat-663[hazbg]',
'zbytomdsvo-zvkcdsm-qbkcc-vyqscdsmc-692[csdmv]',
'qspkfdujmf-fhh-vtfs-uftujoh-415[nmzht]',
'aczupnetwp-nlyoj-qtylyntyr-405[tjvzp]',
'gsvvswmzi-jyddc-glsgspexi-wxsveki-646[puyzv]',
'rgndvtcxr-eaphixr-vgphh-sthxvc-973[ijhlr]',
'dyz-combod-mrymyvkdo-domrxyvyqi-250[pjlom]',
'xst-wigvix-fewoix-jmrergmrk-646[irxeg]',
'xgsvgmotm-kmm-iutzgotsktz-436[mtgko]',
'nzwzcqfw-awldetn-rcldd-nzyeltyxpye-769[delnw]',
'crwwv-oxyyfq-rpbo-qbpqfkd-393[qbfop]',
'wlsiayhcw-jfumncw-alumm-lywycpcha-344[wfzqm]',
'qfkkj-fydelmwp-nlyoj-nzletyr-cpnptgtyr-951[oxpuz]',
'qcffcgwjs-qobrm-rsdofhasbh-376[zwyat]',
'jshzzpmplk-pualyuhapvuhs-yhiipa-zopwwpun-669[simvn]',
'nglmtuex-wrx-kxvxbobgz-735[phwgz]',
'oaxadrgx-nmewqf-abqdmfuaze-404[tysvj]',
'vkppo-zubboruqd-qdqboiyi-998[dhjui]',
'ajvyjprwp-snuuhknjw-dbna-cnbcrwp-667[ziuhp]',
'pybgmyargtc-zsllw-bcnjmwkclr-314[mnhck]',
'pkl-oaynap-xwogap-opknwca-992[nhagy]',
'hafgnoyr-onfxrg-znexrgvat-741[cywdg]',
'vkppo-sqdto-ijehqwu-816[opqde]',
'jvsvymbs-jhukf-zavyhnl-409[vhjsy]',
'hqtyeqsjylu-cqwdujys-tou-husuylydw-686[kcdsj]',
'awzwhofm-ufors-pibbm-rsdofhasbh-298[bfhos]',
'xjinphzm-bmvyz-nxvqzibzm-cpio-mznzvmxc-161[oslup]',
'tfejldvi-xiruv-wcfnvi-fgvirkzfej-529[sgtbw]',
'qlm-pbzobq-ciltbo-pxibp-185[zjmpt]',
'guahyncw-xsy-xymcah-630[fckyn]',
'lahxpnwrl-ljwmh-jlzdrbrcrxw-407[lrwhj]',
'dmybmsuzs-omzpk-ogefayqd-eqdhuoq-222[mctji]',
'forwcoqhwjs-pibbm-twbobqwbu-480[bwoqc]',
'mhi-lxvkxm-lvtoxgzxk-angm-xgzbgxxkbgz-839[lczyd]',
'iqmbazulqp-rxaiqd-mocgueufuaz-378[ygdtb]',
'plolwdub-judgh-udeelw-uhfhlylqj-309[ludhe]',
'raphhxuxts-hrpktcvtg-wjci-htgkxrth-739[htrxc]',
'pybgmyargtc-hcjjwzcyl-rcaflmjmew-548[ajlkr]',
'lnkfaypeha-ywjzu-ykwpejc-ykjpwejiajp-368[jpyae]',
'vehmsegxmzi-gerhc-gsexmrk-hizipstqirx-100[jzboa]',
'pdjqhwlf-frqvxphu-judgh-iorzhu-frqwdlqphqw-725[yijon]',
'kzgwomvqk-kivlg-kwibqvo-ivitgaqa-720[zjyxm]',
'xqvwdeoh-surmhfwloh-fkrfrodwh-dqdobvlv-855[xwtjs]',
'xgsvgmotm-igtje-iugzotm-sgxqkzotm-228[ygzda]',
'lxwbdvna-pajmn-snuuhknjw-bcxajpn-953[yxark]',
'oaddaeuhq-omzpk-oamfuzs-mocgueufuaz-326[odcqb]',
'jyddc-ikk-hitevxqirx-750[btmqn]',
'oaddaeuhq-rxaiqd-fqotzaxask-586[slgad]',
'kwzzwaqdm-moo-nqvivkqvo-798[epnyr]',
'fydelmwp-nsznzwlep-opalcexpye-847[ltbwy]',
'dszphfojd-cbtlfu-tupsbhf-415[fbdhp]',
'hqcfqwydw-rkddo-skijecuh-iuhlysu-114[ynqsd]',
'ixeumktoi-inuiurgzk-uvkxgzouty-410[acqzt]',
'wlqqp-avccpsvre-wzeretzex-347[bgzvw]',
'vdzonmhydc-bzmcx-bnmszhmldms-157[rspcq]',
'fodvvlilhg-mhoobehdq-whfkqrorjb-699[defpm]',
'gntmfefwitzx-ojqqdgjfs-qfgtwfytwd-827[sypzf]',
'nzydfxpc-rclop-mfyyj-wlmzclezcj-769[clyzf]',
'zuv-ykixkz-pkrrehkgt-rumoyzoiy-852[yhkru]',
'aietsrmdih-glsgspexi-vigimzmrk-802[sgnmt]',
'rgndvtcxr-hrpktcvtg-wjci-igpxcxcv-245[xszoq]',
'nwzekwypera-xqjju-skngodkl-758[kejnw]',
'hcd-gsqfsh-rms-rsdzcmasbh-974[vtsqm]',
'qspkfdujmf-dboez-mphjtujdt-935[diolt]',
'aczupnetwp-nlyoj-afcnsldtyr-665[zemik]',
'aoubshwq-qobrm-qcohwbu-rsdofhasbh-376[wyzxl]',
'xtwtelcj-rclop-ncjzrpytn-ojp-qtylyntyr-353[lyris]',
'zvyvgnel-tenqr-pelbtravp-rtt-fnyrf-819[fjysz]',
'ejpanjwpekjwh-bhksan-paydjkhkcu-420[jkahp]',
'ygcrqpkbgf-hnqygt-hkpcpekpi-882[pgkch]',
'raphhxuxts-gpqqxi-pcpanhxh-817[qinta]',
'gsvvswmzi-wgezirkiv-lyrx-stivexmsrw-646[isvrw]',
'htqtwkzq-kqtbjw-fsfqdxnx-307[ytxis]',
'oqnidbshkd-eknvdq-cdozqsldms-833[dqskn]',
'qxdwpopgsdjh-hrpktcvtg-wjci-hidgpvt-219[tdlsw]',
'fnjyxwrinm-snuuhknjw-jwjuhbrb-459[vjwxy]',
'egdytrixat-eaphixr-vgphh-stepgibtci-609[vrzoq]',
'veqtekmrk-fewoix-pskmwxmgw-906[ekmwx]',
'xmrrq-eadalsjq-yjsvw-usfvq-vwnwdghewfl-502[wqsva]',
'dpotvnfs-hsbef-gvaaz-sbccju-tfswjdft-649[lbipa]',
'zvyvgnel-tenqr-enoovg-ybtvfgvpf-637[kchij]',
'pynffvsvrq-cynfgvp-tenff-ybtvfgvpf-247[dcwfm]',
'xjinphzm-bmvyz-xviyt-xjvodib-zibdizzmdib-187[gbnqs]',
'ugfkmewj-yjsvw-hdsklau-yjskk-dgyaklauk-502[kajsu]',
'iruzfrtkzmv-sleep-tljkfdvi-jvimztv-269[vitze]',
'sawlkjevaz-oywrajcan-dqjp-hkceopeyo-550[gvitx]',
'wfummczcyx-wifilzof-vumeyn-ijyluncihm-604[lnmdw]',
'lahxpnwrl-kdwwh-xynajcrxwb-927[xykwv]',
'xekdwvwnzkqo-fahhuxawj-oanreyao-836[xnzsy]',
'mbiyqoxsm-bkllsd-bokmaescsdsyx-718[thcfp]',
'uiovmbqk-zijjqb-zmamizkp-746[xpstr]',
'nglmtuex-vkrhzxgbv-xzz-ehzblmbvl-293[zblvx]',
'xgvnndadzy-kgvnodx-bmvnn-xpnojhzm-nzmqdxz-577[qgfhc]',
'zntargvp-ohaal-jbexfubc-975[vflzg]',
'vqr-ugetgv-tcddkv-gpikpggtkpi-830[zlyxv]',
'qfkkj-nsznzwlep-nzyeltyxpye-847[satrm]',
'dwbcjkun-yujbcrl-pajbb-fxatbqxy-225[glisn]',
'tyftfcrkv-tljkfdvi-jvimztv-113[tvfij]',
'hjgbwuladw-ugjjgkanw-tskcwl-suimakalagf-450[cneum]',
'vkrhzxgbv-wrx-etuhktmhkr-397[fnspx]',
'elrkdcdugrxv-zhdsrqlchg-edvnhw-vklsslqj-933[dlhrs]',
'etaqigpke-lgnnadgcp-uvqtcig-752[ojdgw]',
'nsyjwsfyntsfq-hmthtqfyj-ywfnsnsl-385[sfnyt]',
'bnmrtldq-fqzcd-bgnbnkzsd-sqzhmhmf-833[jipnm]',
'fkqbokxqflkxi-bdd-xkxivpfp-471[rjlxc]',
'lnkfaypeha-bhksan-hwxknwpknu-316[nsoam]',
'pelbtravp-ohaal-erprvivat-221[kdvcs]',
'dfcxsqhwzs-qobrm-qcohwbu-ghcfous-350[choqs]',
'ejpanjwpekjwh-xwogap-ajcejaanejc-706[dtcks]',
'gzefmnxq-rxaiqd-pqhqxabyqzf-638[yntmj]',
'awzwhofm-ufors-qobrm-qcohwbu-hsqvbczcum-454[tfiay]',
'yknnkoera-yhwooebeaz-lhwopey-cnwoo-klanwpekjo-420[fqbwp]',
'bkzrrhehdc-bzmcx-bnzshmf-cdoknxldms-183[bcdhm]',
'htqtwkzq-jll-jslnsjjwnsl-905[oicay]',
'npmhcargjc-bwc-sqcp-rcqrgle-834[thaxs]',
'njmjubsz-hsbef-dboez-dvtupnfs-tfswjdf-909[qkeyv]',
'qyujihctyx-wuhxs-wiuncha-nywbhifias-994[hiuwy]',
'vetllbybxw-unggr-phkdlahi-475[rqpto]',
'etyyx-dff-qdzbpthrhshnm-313[zpisy]',
'ijmockjgz-jwezxo-nojmvbz-993[jozmb]',
'xgvnndadzy-ytz-mznzvmxc-343[uhztm]',
'pxtihgbsxw-yehpxk-wxitkmfxgm-189[clynd]',
'tfiifjzmv-treup-tfekrzedvek-711[gptfw]',
'sedikcuh-whqtu-rqiauj-iuhlysui-660[znymv]',
'ktwbhtvmbox-ynssr-cxeeruxtg-tvjnblbmbhg-813[kdico]',
'xst-wigvix-wgezirkiv-lyrx-tyvglewmrk-542[igrvw]',
'gzefmnxq-fab-eqodqf-pkq-pqhqxabyqzf-222[examq]',
'ncjzrpytn-nlyoj-nzletyr-ecltytyr-925[jybxu]',
'mvkccspson-lexxi-crszzsxq-588[jirxq]',
'zbytomdsvo-bkllsd-ecob-docdsxq-640[getaf]',
'willimcpy-luvvcn-mbcjjcha-552[snrxz]',
'htsxzrjw-lwfij-gntmfefwitzx-hfsid-htfynsl-xmnuunsl-307[fnsth]',
'iuruxlar-jek-jkvruesktz-306[btnac]',
'cqwdujys-rqiauj-efuhqjyedi-166[rgncm]',
'dpssptjwf-dipdpmbuf-sfbdrvjtjujpo-623[yeftd]',
'pbybeshy-onfxrg-qrfvta-195[wmjer]',
'laffe-pkrrehkgt-uvkxgzouty-644[kefgr]',
'rnqnyfwd-lwfij-jll-fhvznxnynts-723[qmybf]',
'dyz-combod-lkcuod-wkbuodsxq-354[suqhl]',
'gpewwmjmih-veffmx-ywiv-xiwxmrk-464[zyowm]',
'xfbqpojafe-cvooz-bdrvjtjujpo-363[qgvxl]',
'wyvqljapsl-kfl-huhsfzpz-435[yomjs]',
'hqcfqwydw-rqiauj-huiuqhsx-478[kxtls]',
'ajyqqgdgcb-zsllw-qrmpyec-392[nztsu]',
'luxciuwncpy-zfiqyl-xypyfijgyhn-838[bzpxo]',
'kdijqrbu-fbqijys-whqii-udwyduuhydw-556[ihdkv]',
'mfklstdw-bwddqtwsf-ghwjslagfk-450[esbmd]',
'lugjuacha-dyffsvyuh-nywbhifias-578[snqwt]',
'ibghopzs-qobrm-qcohwbu-fsoqeiwgwhwcb-298[ndtlm]',
'nwilwcejc-oywrajcan-dqjp-hkceopeyo-394[cejow]',
'vqr-ugetgv-tcddkv-ucngu-258[xetvd]',
'rtt-genvavat-975[taveg]',
'willimcpy-dyffsvyuh-xyjfisgyhn-656[mrjtz]',
'dmbttjgjfe-njmjubsz-hsbef-gmpxfs-dpoubjonfou-779[yakzv]',
'muqfedyput-rqiauj-kiuh-juijydw-660[rkonp]',
'pkl-oaynap-fahhuxawj-odellejc-342[xmeyn]',
'qlm-pbzobq-yrkkv-pbosfzbp-315[bpkoq]',
'wihmogyl-aluxy-vumeyn-ijyluncihm-292[yilmu]',
'mvydjvxodqz-wvnfzo-nvgzn-291[vnzdo]',
'ikhcxvmbex-ktuubm-hixktmbhgl-215[bhkmx]',
'willimcpy-vohhs-xypyfijgyhn-136[pmnwz]',
'ynukcajey-xwogap-skngodkl-394[enjif]',
'ynukcajey-xqjju-zalwnpiajp-394[kzjlp]',
'yhtwhnpun-jovjvshal-hjxbpzpapvu-539[ycldk]',
'nglmtuex-ktuubm-ybgtgvbgz-397[qvtsp]',
'cvabijtm-rmttgjmiv-lmxtwgumvb-174[zpytf]',
'fnjyxwrinm-ajkkrc-mnyuxhvnwc-459[molxs]',
'mybbycsfo-mkxni-yzobkdsyxc-614[ybckm]',
'ugdgjxmd-kusnwfywj-zmfl-ghwjslagfk-840[gfjwd]',
'wbhsfbohwcboz-dzoghwq-ufogg-cdsfohwcbg-766[obghw]',
'qekrixmg-ikk-qerekiqirx-334[aeldt]',
'ktwbhtvmbox-mhi-lxvkxm-utldxm-lmhktzx-657[yqjuz]',
'ugdgjxmd-wyy-klgjsyw-216[gydjw]',
'ucynmlgxcb-cee-pcacgtgle-704[tonxb]',
'nsyjwsfyntsfq-hmthtqfyj-ijxnls-931[rdmog]',
'enzcntvat-ohaal-ybtvfgvpf-455[cvmts]',
'aczupnetwp-nsznzwlep-dlwpd-483[ticoy]',
'wbhsfbohwcboz-pibbm-cdsfohwcbg-350[oebpc]',
'shmml-cynfgvp-tenff-znexrgvat-715[fyuze]',
'sedikcuh-whqtu-uww-huiuqhsx-582[ytsoz]',
'ynukcajey-pkl-oaynap-xwogap-nayaerejc-550[mnfkp]',
'ipvohghykvbz-zjhclunly-obua-huhsfzpz-851[mecug]',
'tbxmlkfwba-oxyyfq-ixyloxqlov-783[woqrj]',
'zilqwikbqdm-kpwkwtibm-abwziom-954[yzmul]',
'kwvacumz-ozilm-zijjqb-apqxxqvo-590[dojfm]',
'bqxnfdmhb-bgnbnkzsd-sdbgmnknfx-547[ajynr]',
'sbnqbhjoh-dmbttjgjfe-dipdpmbuf-gjobodjoh-103[bjdoh]',
'jsvagsulanw-usfvq-vwhsjlewfl-268[kiwfy]',
'mbggf-buzahisl-lnn-wbyjohzpun-539[bnghl]',
'htwwtxnaj-jll-ijajqturjsy-411[zhgpj]',
'amppmqgtc-aylbw-amyrgle-pcacgtgle-496[agclm]',
'jchipqat-uadltg-detgpixdch-895[wogif]',
'ktfitzbgz-unggr-tgterlbl-605[wjmxs]',
'yhwooebeaz-ywjzu-ykwpejc-bejwjyejc-498[sgtmz]',
'muqfedyput-fbqijys-whqii-cqhaujydw-634[pcekt]',
'udskkaxawv-mfklstdw-hdsklau-yjskk-dstgjslgjq-840[xeguw]',
'hqfxxnknji-kqtbjw-qfgtwfytwd-983[fqtwj]',
'veqtekmrk-fyrrc-eguymwmxmsr-490[stfrm]',
'awzwhofm-ufors-ksodcbwnsr-tzcksf-gozsg-298[sofwz]',
'tinnm-gqojsbusf-vibh-gsfjwqsg-714[dczyw]',
'nuatmlmdpage-ngzzk-pqbxakyqzf-456[azgkm]',
'htsxzrjw-lwfij-kqtbjw-jslnsjjwnsl-255[ldqwi]',
'zotts-zfiqyl-lywycpcha-136[fwujm]',
'odiih-kdwwh-ldbcxvna-bnaerln-927[dnabh]',
'nuatmlmdpage-dmnnuf-xasuefuoe-586[atenm]',
'qxdwpopgsdjh-rpcsn-rdpixcv-htgkxrth-739[pkxqs]',
'iruzfrtkzmv-sleep-rthlzjzkzfe-633[ykopj]',
'ejpanjwpekjwh-nwxxep-zaoecj-550[ejpwa]',
'rwcnawjcrxwju-kdwwh-cnlqwxuxph-979[genou]',
'rwcnawjcrxwju-lqxlxujcn-mnyuxhvnwc-719[wxnmk]',
'qlm-pbzobq-avb-jxohbqfkd-601[mzylk]',
'lqwhuqdwlrqdo-vfdyhqjhu-kxqw-frqwdlqphqw-881[qwdhl]',
'udglrdfwlyh-gbh-ilqdqflqj-439[otyms]',
'hvbizodx-ezggtwzvi-nojmvbz-473[isoun]',
'dzczkrip-xiruv-vxx-ivtvzmzex-763[yblku]',
'oaxadrgx-bxmefuo-sdmee-xasuefuoe-222[exaou]',
'bnmrtldq-fqzcd-bzmcx-vnqjrgno-287[odhcx]',
'wifilzof-wbiwifuny-lyuwkocmcncih-864[iwcfl]',
'ynukcajey-ydkykhwpa-yqopkian-oanreya-654[cghze]',
'egdytrixat-qjccn-bpgztixcv-349[ctgix]',
'slqryzjc-cee-kypicrgle-210[gqknl]',
'jef-iushuj-rqiauj-iqbui-296[mzxhd]',
'qcffcgwjs-gqojsbusf-vibh-gozsg-220[gsfbc]',
'eadalsjq-yjsvw-usfvq-ugslafy-vwnwdghewfl-606[kyzev]',
'willimcpy-jfumncw-alumm-xymcah-318[mclai]',
'excdklvo-bkllsd-vklybkdybi-432[hrznt]',
'gntmfefwitzx-gzssd-xjwanhjx-801[xfgjn]',
'qzoggwtwsr-xszzmpsob-fsgsofqv-636[cthmf]',
'houngfgxjuay-hgyqkz-xkykgxin-332[spxuj]',
'oaxadrgx-eomhqzsqd-tgzf-qzsuzqqduzs-638[meluy]',
'qzoggwtwsr-qobrm-rsdzcmasbh-740[fwsan]',
'bkzrrhehdc-sno-rdbqds-qzaahs-vnqjrgno-339[rdhnq]',
'excdklvo-cmkfoxqob-rexd-zebmrkcsxq-458[xaqgb]',
'oknkvcta-itcfg-uecxgpigt-jwpv-fgukip-856[gcikp]',
'shoewudys-rkddo-qdqboiyi-530[doiqs]',
'mrxivrexmsrep-gerhc-pefsvexsvc-828[ubzia]',
'fydelmwp-mfyyj-hzcvdsza-769[anbml]',
'rwcnawjcrxwju-ljwmh-bqryyrwp-277[nxatm]'
]
if __name__ == "__main__":
find_room_code()
|
thatguyandy27/advent-of-code-2016
|
problem8.py
|
Python
|
mit
| 50,759
|
from django.db import models
class AbstractDnDModel(models.Model):
"""
A model for the Dnd tables
"""
class Meta:
abstract = True
app_label = 'webdnd'
|
Saevon/webdnd
|
dnd/models/abstract.py
|
Python
|
mit
| 184
|
import argparse
from sqlalchemy import create_engine
from irco import models
from irco.logging import sentry
def main():
argparser = argparse.ArgumentParser('irco-init')
argparser.add_argument('-v', '--verbose', action='store_true')
argparser.add_argument('database')
args = argparser.parse_args()
sentry.context.merge({
'tags': {'command': 'irco-init'},
'extra': {'parsed_arguments': args.__dict__}
})
engine = create_engine(args.database, echo=args.verbose)
models.Base.metadata.create_all(engine)
|
GaretJax/irco
|
irco/scripts/init.py
|
Python
|
mit
| 553
|
def ip_to_long(ip):
"""Converts a dotted-quad IP to a long.
Input: "192.168.0.1"
Output: 3232235521L
"""
import struct, socket
return struct.unpack(">L", socket.inet_aton(ip))[0]
def long_to_ip(long):
"""Converts a long IP to a dotted-quad string.
Input: 3232235521L
Output: "192.168.0.1"
"""
import struct, socket
return socket.inet_ntoa(struct.pack(">L", long))
#Helper Functions relying on external data:
External_IP = None
class IPError:
def __init__(self, error=""):
self.error = error
def __repr__(self):
return self.error
def ip_to_long_external(ip):
"""Converts a dotted-quad IP to a long, while automaticly changing it to an external IP.
Relies on a global named External_IP, which is a string in dotted-quad IP notation
Assuming your WAN IP is 69.123.251.160,
Input: "192.168.0.1"
Output: 1165753248L (69.123.251.160 encoded)
"""
global External_IP
dip = ip_to_long(ip) #Assume ip is okay
foct = long(ip.split('.')[0]) #grab first octet
internalipoct = (192, 127)
if(foct in internalipoct): #check it against pre-defined "internal" list
if External_IP == None:
raise IPError
dip = ip_to_long(External_IP)
return dip
def get_external_ip(nocache=False):
"""Gets external IP from checkdns.dyndns.org, and sets global External_IP
Warning: no timeout"""
global External_IP
if (External_IP != None) and (nocache != True):
return External_IP
try:
import urllib
u = urllib.urlopen("http://checkip.dyndns.org/")
n = u.read(1024)
u.close()
import re
ip = re.findall("<body>Current IP Address: ([^<]*)</body>", n)
if len(ip) == 0:
raise IPError("Unable to get IP from http://checkip.dyndns.org/: %s"%(n))
ip = ip[0]
External_IP = ip
return ip
except:
raise IPError("Unable to get IP from http://checkip.dyndns.org/")
return None
if __name__ == "__main__":
import unittest
class IPConversionTestCase(unittest.TestCase):
def runTest(self):
assert ip_to_long("192.168.0.1") == 3232235521L, 'ip_to_long failure'
assert long_to_ip(3232235521L) == "192.168.0.1", 'long_to_ip failure'
#print "IP converstion tests OK"
class ExternalIPTestcase(unittest.TestCase):
def runTest(self):
ip = get_external_ip()
#print "External IP is %s"%(ip)
assert ip != None, 'get_external_ip() failed'
import time
print time.strftime("[%Y-%m-%d %H:%M]: ") + "Running ip.py testcases..."
unittest.main()
|
EBNull/py-wol
|
ip.py
|
Python
|
gpl-3.0
| 2,810
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Parser:
"""
Abstract class of parser
"""
__metaclass__ = ABCMeta
def __init__(self, config):
"""
Initialized by config dictionary
"""
@abstractmethod
def find_headers(self, source):
"""
Find headers of seminars
"""
@abstractmethod
def parse_seminar(self, header):
"""
Parse seminar from header
"""
@staticmethod
def get(source_name, config):
"""
Get instance of parser implementation
"""
import util;
return util.get_class(__name__, source_name, Parser)(config)
class DOMParser(Parser):
"""
Parser for DOM tree
"""
def __init__(self, config):
"""
Initialized with config dictionary
config['selector']: XPath selector to find header of seminars
config['namespaces']: Dictionary of namespaces for XPath
config['items']: Array of dictionary to parse subitems of seminars
[n]['selector']: XPath selector to find subitem
[n]['pattern']: Regexp pattern of subitem (to be contain named groups named 'title', 'date', 'time', 'place' or 'contents')
[n]['dateformat'], [n]['timeformat']: DateTime format to parse matched 'date' or 'time'
"""
import re
self.selector = config.get('selector', '.');
self.namespaces = config.get('namespaces', {});
items = config.get('items', []);
self.items = [{
'selector': item.get('selector', '.'),
'pattern': re.compile(item.get('pattern', '.*')),
'dateformat': item.get('dateformat'),
'timeformat': item.get('timeformat'),
} for item in items];
def find_headers(self, source):
"""
Find headers of seminars from DOM tree
"""
return source.xpath(self.selector, namespaces = self.namespaces);
def parse_seminar(self, header, defaults = {}):
"""
Parse seminar from header DOM element
"""
from datetime import date, datetime
from seminar import Seminar
_defaults = Seminar.parse_defaults(defaults)
seminar = Seminar(**_defaults)
YEAR_UNDEFINED = datetime.strptime('', '').date().year
today = date.today()
for name, value, dateformat, timeformat in [
(name, match[1], item['dateformat'], item['timeformat'])
for matches, item in [(item['pattern'].match(e.text), item)
for item in self.items
for e in header.xpath(item['selector'], namespaces = self.namespaces)]
if matches
for match in matches.groupdict().items()
for name in Seminar.ITEM_NAMES if match[1] and name == match[0]
]:
if name == 'date':
value = datetime.strptime(value, dateformat).date()
if value.year == YEAR_UNDEFINED:
value = date(_defaults.get('date', today).year, value.month, value.day)
elif name == 'time':
value = datetime.strptime(value, timeformat).time()
attr = getattr(seminar, name)
if isinstance(attr, list):
attr.append(value)
else:
setattr(seminar, name, value)
return seminar
|
ukatama/seminarnotifier
|
seminarnotifier/seminarparser.py
|
Python
|
mit
| 3,451
|
import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: sequence identifier
# value: neucleotide sequence
key = record[0]
value = record[1][:-10]
mr.emit_intermediate(value, key)
def reducer(key, list_of_values):
# key: nucleotide sequence
# value: list of ids
mr.emit(key)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
"""
Consider a set of key-value pairs where each key is sequence id and each value is a string of nucleotides, e.g., GCTTCCGAAATGCTCGAA....
Write a MapReduce query to remove the last 10 characters from each string of nucleotides, then remove any duplicates generated.
Map Input
Each input record is a 2 element list [sequence id, nucleotides] where sequence id is a string representing a unique identifier for the sequence and nucleotides is a string representing a sequence of nucleotides
Reduce Output
The output from the reduce function should be the unique trimmed nucleotide strings.
You can test your solution to this problem using dna.json:
$ python unique_trims.py dna.json
You can verify your solution by comparing your result with the file unique_trims.json.
"""
|
okkhoy/pyDataAnalysis
|
dataAnalysis/take3/unique_trims.py
|
Python
|
mit
| 1,422
|
# -*- coding: utf-8 -*-
"""Compute resolution matrix for linear estimators."""
# Authors: olaf.hauk@mrc-cbu.cam.ac.uk
#
# License: BSD-3-Clause
from copy import deepcopy
import numpy as np
from .. import pick_channels_forward, EvokedArray, SourceEstimate
from ..io.constants import FIFF
from ..utils import logger, verbose
from ..forward.forward import convert_forward_solution
from ..minimum_norm import apply_inverse
from ..source_estimate import _prepare_label_extraction
from ..label import Label
@verbose
def make_inverse_resolution_matrix(forward, inverse_operator, method='dSPM',
lambda2=1. / 9., verbose=None):
"""Compute resolution matrix for linear inverse operator.
Parameters
----------
forward : instance of Forward
Forward Operator.
inverse_operator : instance of InverseOperator
Inverse operator.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse method to use (MNE, dSPM, sLORETA).
lambda2 : float
The regularisation parameter.
%(verbose)s
Returns
-------
resmat: array, shape (n_orient_inv * n_dipoles, n_orient_fwd * n_dipoles)
Resolution matrix (inverse operator times forward operator).
The result of applying the inverse operator to the forward operator.
If source orientations are not fixed, all source components will be
computed (i.e. for n_orient_inv > 1 or n_orient_fwd > 1).
The columns of the resolution matrix are the point-spread functions
(PSFs) and the rows are the cross-talk functions (CTFs).
"""
# make sure forward and inverse operator match
inv = inverse_operator
fwd = _convert_forward_match_inv(forward, inv)
# don't include bad channels
# only use good channels from inverse operator
bads_inv = inv['info']['bads']
# good channels
ch_names = [c for c in inv['info']['ch_names'] if (c not in bads_inv)]
fwd = pick_channels_forward(fwd, ch_names, ordered=True)
# get leadfield matrix from forward solution
leadfield = fwd['sol']['data']
invmat = _get_matrix_from_inverse_operator(inv, fwd,
method=method, lambda2=lambda2)
resmat = invmat.dot(leadfield)
logger.info('Dimensions of resolution matrix: %d by %d.' % resmat.shape)
return resmat
@verbose
def _get_psf_ctf(resmat, src, idx, func, mode, n_comp, norm, return_pca_vars,
verbose=None):
"""Get point-spread (PSFs) or cross-talk (CTFs) functions.
Parameters
----------
resmat : array, shape (n_dipoles, n_dipoles)
Forward Operator.
src : Source Space
Source space used to compute resolution matrix.
%(pctf_idx)s
func : str ('psf' | 'ctf')
Whether to produce PSFs or CTFs. Defaults to psf.
%(pctf_mode)s
%(pctf_n_comp)s
%(pctf_norm)s
%(pctf_return_pca_vars)s
%(verbose)s
Returns
-------
%(pctf_stcs)s
%(pctf_pca_vars)s
"""
# check for consistencies in input parameters
_check_get_psf_ctf_params(mode, n_comp, return_pca_vars)
# backward compatibility
if norm is True:
norm = 'max'
# get relevant vertices in source space
verts_all = _vertices_for_get_psf_ctf(idx, src)
# vertices used in forward and inverse operator
vertno_lh = src[0]['vertno']
vertno_rh = src[1]['vertno']
vertno = [vertno_lh, vertno_rh]
# the following will operate on columns of funcs
if func == 'ctf':
resmat = resmat.T
# Functions and variances per label
stcs = []
pca_vars = []
for verts in verts_all:
# get relevant PSFs or CTFs for specified vertices
funcs = resmat[:, verts]
# normalise PSFs/CTFs if requested
if norm is not None:
funcs = _normalise_psf_ctf(funcs, norm)
# summarise PSFs/CTFs across vertices if requested
pca_var = None # variances computed only if return_pca_vars=True
if mode is not None:
funcs, pca_var = _summarise_psf_ctf(funcs, mode, n_comp,
return_pca_vars)
# convert to source estimate
stc = SourceEstimate(funcs, vertno, tmin=0., tstep=1.)
stcs.append(stc)
pca_vars.append(pca_var)
# if just one list or label specified, simplify output
if len(stcs) == 1:
stcs = stc
if len(pca_vars) == 1:
pca_vars = pca_var
if pca_var is not None:
return stcs, pca_vars
else:
return stcs
def _check_get_psf_ctf_params(mode, n_comp, return_pca_vars):
"""Check input parameters of _get_psf_ctf() for consistency."""
if mode in [None, 'sum', 'mean'] and n_comp > 1:
msg = 'n_comp must be 1 for mode=%s.' % mode
raise ValueError(msg)
if mode != 'pca' and return_pca_vars:
msg = 'SVD variances can only be returned if mode=''pca''.'
raise ValueError(msg)
def _vertices_for_get_psf_ctf(idx, src):
"""Get vertices in source space for PSFs/CTFs in _get_psf_ctf()."""
# idx must be list
# if label(s) specified get the indices, otherwise just carry on
if type(idx[0]) is Label:
# specify without source time courses, gets indices per label
verts_labs, _ = _prepare_label_extraction(
stc=None, labels=idx, src=src, mode='mean', allow_empty=False,
use_sparse=False)
# verts_labs can be list of lists
# concatenate indices per label across hemispheres
# one list item per label
verts = []
for v in verts_labs:
# if two hemispheres present
if type(v) is list:
# indices for both hemispheres in one list
this_verts = np.concatenate((v[0], v[1]))
else:
this_verts = np.array(v)
verts.append(this_verts)
# check if list of list or just list
else:
if type(idx[0]) is list: # if list of list of integers
verts = idx
else: # if list of integers
verts = [idx]
return verts
def _normalise_psf_ctf(funcs, norm):
"""Normalise PSFs/CTFs in _get_psf_ctf()."""
# normalise PSFs/CTFs if specified
if norm == 'max':
maxval = max(-funcs.min(), funcs.max())
funcs = funcs / maxval
elif norm == 'norm': # normalise to maximum norm across columns
norms = np.linalg.norm(funcs, axis=0)
funcs = funcs / norms.max()
return funcs
def _summarise_psf_ctf(funcs, mode, n_comp, return_pca_vars):
"""Summarise PSFs/CTFs across vertices."""
from scipy import linalg
s_var = None # only computed for return_pca_vars=True
if mode == 'maxval': # pick PSF/CTF with maximum absolute value
absvals = np.maximum(-np.min(funcs, axis=0), np.max(funcs, axis=0))
if n_comp > 1: # only keep requested number of sorted PSFs/CTFs
sortidx = np.argsort(absvals)
maxidx = sortidx[-n_comp:]
else: # faster if only one required
maxidx = absvals.argmax()
funcs = funcs[:, maxidx]
elif mode == 'maxnorm': # pick PSF/CTF with maximum norm
norms = np.linalg.norm(funcs, axis=0)
if n_comp > 1: # only keep requested number of sorted PSFs/CTFs
sortidx = np.argsort(norms)
maxidx = sortidx[-n_comp:]
else: # faster if only one required
maxidx = norms.argmax()
funcs = funcs[:, maxidx]
elif mode == 'sum': # sum across PSFs/CTFs
funcs = np.sum(funcs, axis=1)
elif mode == 'mean': # mean of PSFs/CTFs
funcs = np.mean(funcs, axis=1)
elif mode == 'pca': # SVD across PSFs/CTFs
# compute SVD of PSFs/CTFs across vertices
u, s, _ = linalg.svd(funcs, full_matrices=False)
funcs = u[:, :n_comp]
# if explained variances for SVD components requested
if return_pca_vars:
# explained variance of individual SVD components
s2 = s * s
s_var = 100 * s2[:n_comp] / s2.sum()
return funcs, s_var
@verbose
def get_point_spread(resmat, src, idx, mode=None, n_comp=1, norm=False,
return_pca_vars=False, verbose=None):
"""Get point-spread (PSFs) functions for vertices.
Parameters
----------
resmat : array, shape (n_dipoles, n_dipoles)
Forward Operator.
src : instance of SourceSpaces
Source space used to compute resolution matrix.
%(pctf_idx)s
%(pctf_mode)s
%(pctf_n_comp)s
%(pctf_norm)s
%(pctf_return_pca_vars)s
%(verbose)s
Returns
-------
%(pctf_stcs)s
%(pctf_pca_vars)s
"""
return _get_psf_ctf(resmat, src, idx, func='psf', mode=mode, n_comp=n_comp,
norm=norm, return_pca_vars=return_pca_vars)
@verbose
def get_cross_talk(resmat, src, idx, mode=None, n_comp=1, norm=False,
return_pca_vars=False, verbose=None):
"""Get cross-talk (CTFs) function for vertices.
Parameters
----------
resmat : array, shape (n_dipoles, n_dipoles)
Forward Operator.
src : instance of SourceSpaces
Source space used to compute resolution matrix.
%(pctf_idx)s
%(pctf_mode)s
%(pctf_n_comp)s
%(pctf_norm)s
%(pctf_return_pca_vars)s
%(verbose)s
Returns
-------
%(pctf_stcs)s
%(pctf_pca_vars)s
"""
return _get_psf_ctf(resmat, src, idx, func='ctf', mode=mode, n_comp=n_comp,
norm=norm, return_pca_vars=return_pca_vars)
def _convert_forward_match_inv(fwd, inv):
"""Ensure forward and inverse operators match.
Inverse operator and forward operator must have same surface orientations,
but can have different source orientation constraints.
"""
# did inverse operator use fixed orientation?
is_fixed_inv = _check_fixed_ori(inv)
# did forward operator use fixed orientation?
is_fixed_fwd = _check_fixed_ori(fwd)
# if inv or fwd fixed: do nothing
# if inv loose: surf_ori must be True
# if inv free: surf_ori must be False
if not is_fixed_inv and not is_fixed_fwd:
is_loose_inv = not (inv['orient_prior']['data'] == 1.).all()
if is_loose_inv:
if not fwd['surf_ori']:
fwd = convert_forward_solution(fwd, surf_ori=True)
elif fwd['surf_ori']: # free orientation, change fwd
fwd = convert_forward_solution(fwd, surf_ori=False)
return fwd
def _prepare_info(inverse_operator):
"""Get a usable dict."""
# in order to convert sub-leadfield matrix to evoked data type (pretending
# it's an epoch, see in loop below), uses 'info' from inverse solution
# because this has all the correct projector information
info = deepcopy(inverse_operator['info'])
with info._unlock():
info['sfreq'] = 1000. # necessary
info['projs'] = inverse_operator['projs']
return info
def _get_matrix_from_inverse_operator(inverse_operator, forward, method='dSPM',
lambda2=1. / 9.):
"""Get inverse matrix from an inverse operator.
Currently works only for fixed/loose orientation constraints
For loose orientation constraint, the CTFs are computed for the normal
component (pick_ori='normal').
Parameters
----------
inverse_operator : instance of InverseOperator
The inverse operator.
forward : instance of Forward
The forward operator.
method : 'MNE' | 'dSPM' | 'sLORETA'
Inverse methods (for apply_inverse).
lambda2 : float
The regularization parameter (for apply_inverse).
Returns
-------
invmat : array, shape (n_dipoles, n_channels)
Inverse matrix associated with inverse operator and specified
parameters.
"""
# make sure forward and inverse operators match with respect to
# surface orientation
_convert_forward_match_inv(forward, inverse_operator)
info_inv = _prepare_info(inverse_operator)
# only use channels that are good for inverse operator and forward sol
ch_names_inv = info_inv['ch_names']
n_chs_inv = len(ch_names_inv)
bads_inv = inverse_operator['info']['bads']
# indices of bad channels
ch_idx_bads = [ch_names_inv.index(ch) for ch in bads_inv]
# create identity matrix as input for inverse operator
# set elements to zero for non-selected channels
id_mat = np.eye(n_chs_inv)
# convert identity matrix to evoked data type (pretending it's an epoch)
ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.)
# apply inverse operator to identity matrix in order to get inverse matrix
# free orientation constraint not possible because apply_inverse would
# combine components
# check if inverse operator uses fixed source orientations
is_fixed_inv = _check_fixed_ori(inverse_operator)
# choose pick_ori according to inverse operator
if is_fixed_inv:
pick_ori = None
else:
pick_ori = 'vector'
# columns for bad channels will be zero
invmat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
method=method, pick_ori=pick_ori)
# turn source estimate into numpy array
invmat = invmat_op.data
# remove columns for bad channels
# take into account it may be 3D array
invmat = np.delete(invmat, ch_idx_bads, axis=invmat.ndim - 1)
# if 3D array, i.e. multiple values per location (fixed and loose),
# reshape into 2D array
if invmat.ndim == 3:
v0o1 = invmat[0, 1].copy()
v3o2 = invmat[3, 2].copy()
shape = invmat.shape
invmat = invmat.reshape(shape[0] * shape[1], shape[2])
# make sure that reshaping worked
assert np.array_equal(v0o1, invmat[1])
assert np.array_equal(v3o2, invmat[11])
logger.info("Dimension of Inverse Matrix: %s" % str(invmat.shape))
return invmat
def _check_fixed_ori(inst):
"""Check if inverse or forward was computed for fixed orientations."""
is_fixed = inst['source_ori'] != FIFF.FIFFV_MNE_FREE_ORI
return is_fixed
|
bloyl/mne-python
|
mne/minimum_norm/resolution_matrix.py
|
Python
|
bsd-3-clause
| 14,150
|
import json
from websocket import create_connection
class Connection(object):
def __init__(self, socket_url, api_key, client_name):
url = '{0}?clientName={1}'.format(socket_url, client_name)
self._conn = create_connection(
url=url,
header=["X-API-Key: {0}".format(api_key)])
def send(self, message):
self._conn.send(json.dumps(message))
def receive(self):
return json.loads(self._conn.recv())
def close(self):
self._conn.close()
|
exitcodezero/picloud-client-python
|
picloud_client/connection.py
|
Python
|
mit
| 515
|
from mock import Mock
from zeit.cms.checkout.helper import checked_out
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
import gocept.lxml.objectify
import zeit.cms.content.sources
import zeit.cms.interfaces
import zeit.cms.testing
import zope.interface
class ExampleSource(zeit.cms.content.sources.XMLSource):
attribute = 'id'
def _get_tree(self):
return gocept.lxml.objectify.fromstring("""\
<items>
<item id="one">One</item>
<item id="two" available="zeit.cms.interfaces.ICMSContent">Two</item>
<item id="three" available="zeit.cms.interfaces.IAsset">Three</item>
</items>
""")
class UnresolveableSource(zeit.cms.content.sources.XMLSource):
attribute = 'id'
def _get_tree(self):
return gocept.lxml.objectify.fromstring("""\
<items>
<item id="foo" available="foo.bar.IAintResolveable">Foo</item>
</items>
""")
class XMLSourceTest(zeit.cms.testing.ZeitCmsTestCase):
def test_values_without_available_attribute_are_returned_for_all_contexts(
self):
source = ExampleSource().factory
context = Mock()
self.assertEqual(['one'], source.getValues(context))
def test_values_are_only_available_if_context_provides_that_interface(
self):
source = ExampleSource().factory
context = Mock()
zope.interface.alsoProvides(context, zeit.cms.interfaces.ICMSContent)
self.assertEqual(['one', 'two'], source.getValues(context))
def test_unresolveable_interfaces_should_make_item_unavailable(self):
source = UnresolveableSource().factory
context = Mock()
self.assertEqual([], source.getValues(context))
def test_available_can_list_multiple_interfaces_separated_by_space(self):
source = ExampleSource().factory
context = Mock()
zope.interface.alsoProvides(context, zeit.cms.interfaces.IAsset)
self.assertEqual(['one', 'two', 'three'], source.getValues(context))
class AddableCMSContentTypeSourceTest(zeit.cms.testing.ZeitCmsTestCase):
def test_includes_IAddableContent(self):
class IFoo(zeit.cms.interfaces.ICMSContent):
pass
self.zca.patch_utility(
IFoo, zeit.cms.content.interfaces.IAddableContent, 'IFoo')
self.assertIn(
IFoo,
list(zeit.cms.content.sources.AddableCMSContentTypeSource()(None)))
class StorystreamReferenceTest(zeit.cms.testing.ZeitCmsTestCase):
def test_resolves_reference_from_source_config(self):
self.repository['storystream'] = ExampleContentType()
with checked_out(self.repository['testcontent']) as co:
co.storystreams = (zeit.cms.content.sources.StorystreamSource()(
None).find('test'),)
self.assertEqual(
self.repository['storystream'],
self.repository['testcontent'].storystreams[0].references)
class AccessSourceTest(zeit.cms.testing.ZeitCmsTestCase):
def test_cms_ids_should_be_translatable_to_c1_ids(self):
access_source = zeit.cms.content.sources.AccessSource().factory
for node in access_source._get_tree().iterchildren('*'):
assert access_source.translate_to_c1(node.get('id')) == (
node.get('c1_id'))
def test_non_translatable_ids_should_return_none(self):
access_source = zeit.cms.content.sources.AccessSource().factory
assert access_source.translate_to_c1('hrmpf') is None
class ProductSourceTest(zeit.cms.testing.ZeitCmsTestCase):
def setUp(self):
source = zeit.cms.content.sources.PRODUCT_SOURCE
self.values = list(source(None))
def test_zeit_has_zeit_magazin_as_dependent_products(self):
for value in self.values:
if value.id == "ZEI":
self.assertEqual('Zeit Magazin', value.dependent_products[0]
.title)
def test_source_without_dependencies_has_empty_list_as_dependent_products(
self):
self.assertEqual([], self.values[1].dependent_products)
def test_invalid_dependent_products_configuration_has_no_effect_on_product(
self):
for value in self.values:
if value.id == "BADDEPENDENCY":
self.assertEqual([], value.dependent_products)
break
class PrintRessortTest(zeit.cms.testing.ZeitCmsTestCase):
def test_source_has_title(self):
source = zeit.cms.content.sources.PRINT_RESSORT_SOURCE
self.assertEqual("Chanson", source.factory.getTitle(None, 'Chancen'))
class SerieSourceTest(zeit.cms.testing.ZeitCmsTestCase):
def test_does_not_break_on_nonexistent_values(self):
source = zeit.cms.content.sources.SerieSource(None)
context = None
self.assertEqual(None, source.factory.getTitle(context, None))
self.assertEqual(None, source.factory.getToken(context, None))
|
ZeitOnline/zeit.cms
|
src/zeit/cms/content/tests/test_sources.py
|
Python
|
bsd-3-clause
| 4,893
|
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class FenxiaoDealerRequisitionorderRefuseRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.dealer_order_id = None
self.reason = None
self.reason_detail = None
def getapiname(self):
return 'taobao.fenxiao.dealer.requisitionorder.refuse'
|
CooperLuan/devops.notes
|
taobao/top/api/rest/FenxiaoDealerRequisitionorderRefuseRequest.py
|
Python
|
mit
| 410
|
"""
Internal utility functions.
`htmlentitydecode` came from here:
http://wiki.python.org/moin/EscapingHtml
"""
from __future__ import print_function
import contextlib
import re
import sys
import time
try:
from html.entities import name2codepoint
unichr = chr
import urllib.request as urllib2
import urllib.parse as urlparse
except ImportError:
from htmlentitydefs import name2codepoint
import urllib2
import urlparse
def htmlentitydecode(s):
return re.sub(
'&(%s);' % '|'.join(name2codepoint),
lambda m: unichr(name2codepoint[m.group(1)]), s)
def smrt_input(globals_, locals_, ps1=">>> ", ps2="... "):
inputs = []
while True:
if inputs:
prompt = ps2
else:
prompt = ps1
inputs.append(input(prompt))
try:
ret = eval('\n'.join(inputs), globals_, locals_)
if ret:
print(str(ret))
return
except SyntaxError:
pass
def printNicely(string):
if hasattr(sys.stdout, 'buffer'):
sys.stdout.buffer.write(string.encode('utf8'))
print()
else:
print(string.encode('utf8'))
__all__ = ["htmlentitydecode", "smrt_input"]
def err(msg=""):
print(msg, file=sys.stderr)
class Fail(object):
"""A class to count fails during a repetitive task.
Args:
maximum: An integer for the maximum of fails to allow.
exit: An integer for the exit code when maximum of fail is reached.
Methods:
count: Count a fail, exit when maximum of fails is reached.
wait: Same as count but also sleep for a given time in seconds.
"""
def __init__(self, maximum=10, exit=1):
self.i = maximum
self.exit = exit
def count(self):
self.i -= 1
if self.i == 0:
err("Too many consecutive fails, exiting.")
raise SystemExit(self.exit)
def wait(self, delay=0):
self.count()
if delay > 0:
time.sleep(delay)
def find_links(line):
"""Find all links in the given line. The function returns a sprintf style
format string (with %s placeholders for the links) and a list of urls."""
l = line.replace("%", "%%")
regex = "(https?://[^ )]+)"
return (
re.sub(regex, "%s", l),
[m.group(1) for m in re.finditer(regex, l)])
def follow_redirects(link, sites= None):
"""Follow directs for the link as long as the redirects are on the given
sites and return the resolved link."""
def follow(url):
return sites == None or urlparse.urlparse(url).hostname in sites
class RedirectHandler(urllib2.HTTPRedirectHandler):
def __init__(self):
self.last_url = None
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
self.last_url = newurl
if not follow(newurl):
return None
r = urllib2.HTTPRedirectHandler.redirect_request(
self, req, fp, code, msg, hdrs, newurl)
r.get_method = lambda : 'HEAD'
return r
if not follow(link):
return link
redirect_handler = RedirectHandler()
opener = urllib2.build_opener(redirect_handler)
req = urllib2.Request(link)
req.get_method = lambda : 'HEAD'
try:
with contextlib.closing(opener.open(req)) as site:
return site.url
except (urllib2.HTTPError, urllib2.URLError):
return redirect_handler.last_url if redirect_handler.last_url else link
def expand_line(line, sites):
"""Expand the links in the line for the given sites."""
l = line.strip()
msg_format, links = find_links(l)
args = tuple(follow_redirects(l, sites) for l in links)
return msg_format % args
def parse_host_list(list_of_hosts):
"""Parse the comma separated list of hosts."""
p = set(
m.group(1) for m in re.finditer("\s*([^,\s]+)\s*,?\s*", list_of_hosts))
return p
|
shepdl/stream-daemon
|
twitter_local/util.py
|
Python
|
mit
| 3,963
|
from .search_products_request import SearchProductsRequest
from .search_products_schema import SearchProductsSchema
|
willrp/willbuyer
|
backend/util/request/store/search_products_request/__init__.py
|
Python
|
mit
| 116
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
class ExternalPlugin(object):
schema = {'type': 'boolean'}
def on_task_input(self, task, config):
return [Entry('test entry', 'fake url')]
@event('plugin.register')
def register_plugin():
plugin.register(ExternalPlugin, 'external_plugin', api_ver=2)
|
qvazzler/Flexget
|
tests/external_plugins/external_plugin.py
|
Python
|
mit
| 508
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import unicodedata
from openerp.osv import fields, osv
from openerp.tools import ustr
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class mail_alias(osv.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
def _get_alias_domain(self, cr, uid, ids, name, args, context=None):
ir_config_parameter = self.pool.get("ir.config_parameter")
domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
return dict.fromkeys(ids, domain or "")
_columns = {
'alias_name': fields.char('Alias', required=True,
help="The name of the email alias, e.g. 'jobs' "
"if you want to catch emails for <jobs@example.my.openerp.com>",),
'alias_model_id': fields.many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (OpenERP Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]"),
'alias_user_id': fields.many2one('res.users', 'Owner',
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address."),
'alias_defaults': fields.text('Default Values', required=True,
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias."),
'alias_force_thread_id': fields.integer('Record Thread ID',
help="Optional ID of a thread (record) to which all incoming "
"messages will be attached, even if they did not reply to it. "
"If set, this will disable the creation of new records completely."),
'alias_domain': fields.function(_get_alias_domain, string="Alias domain", type='char', size=None),
}
_defaults = {
'alias_defaults': '{}',
'alias_user_id': lambda self,cr,uid,context: uid,
# looks better when creating new aliases - even if the field is informative only
'alias_domain': lambda self,cr,uid,context: self._get_alias_domain(cr, SUPERUSER_ID,[1],None,None)[1]
}
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
def _check_alias_defaults(self, cr, uid, ids, context=None):
try:
for record in self.browse(cr, uid, ids, context=context):
dict(eval(record.alias_defaults))
except Exception:
return False
return True
_constraints = [
(_check_alias_defaults, '''Invalid expression, it must be a literal python dictionary definition e.g. "{'field': 'value'}"''', ['alias_defaults']),
]
def name_get(self, cr, uid, ids, context=None):
"""Return the mail alias display alias_name, inclusing the implicit
mail catchall domain from config.
e.g. `jobs@openerp.my.openerp.com` or `sales@openerp.my.openerp.com`
"""
return [(record['id'], "%s@%s" % (record['alias_name'], record['alias_domain']))
for record in self.read(cr, uid, ids, ['alias_name', 'alias_domain'], context=context)]
def _find_unique(self, cr, uid, name, context=None):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
if not self.search(cr, uid, [('alias_name', '=', new_name)]):
break
sequence = (sequence + 1) if sequence else 2
return new_name
def migrate_to_alias(self, cr, child_model_name, child_table_name, child_model_auto_init_fct,
alias_id_column, alias_key, alias_prefix='', alias_force_key='', alias_defaults={}, context=None):
""" Installation hook to create aliases for all users and avoid constraint errors.
:param child_model_name: model name of the child class (i.e. res.users)
:param child_table_name: table name of the child class (i.e. res_users)
:param child_model_auto_init_fct: pointer to the _auto_init function
(i.e. super(res_users,self)._auto_init(cr, context=context))
:param alias_id_column: alias_id column (i.e. self._columns['alias_id'])
:param alias_key: name of the column used for the unique name (i.e. 'login')
:param alias_prefix: prefix for the unique name (i.e. 'jobs' + ...)
:param alias_force_key': name of the column for force_thread_id;
if empty string, not taken into account
:param alias_defaults: dict, keys = mail.alias columns, values = child
model column name used for default values (i.e. {'job_id': 'id'})
"""
if context is None:
context = {}
# disable the unique alias_id not null constraint, to avoid spurious warning during
# super.auto_init. We'll reinstall it afterwards.
alias_id_column.required = False
# call _auto_init
child_model_auto_init_fct(cr, context=context)
registry = RegistryManager.get(cr.dbname)
mail_alias = registry.get('mail.alias')
child_class_model = registry.get(child_model_name)
no_alias_ids = child_class_model.search(cr, SUPERUSER_ID, [('alias_id', '=', False)], context={'active_test': False})
# Use read() not browse(), to avoid prefetching uninitialized inherited fields
for obj_data in child_class_model.read(cr, SUPERUSER_ID, no_alias_ids, [alias_key]):
alias_vals = {'alias_name': '%s%s' % (alias_prefix, obj_data[alias_key])}
if alias_force_key:
alias_vals['alias_force_thread_id'] = obj_data[alias_force_key]
alias_vals['alias_defaults'] = dict((k, obj_data[v]) for k, v in alias_defaults.iteritems())
alias_id = mail_alias.create_unique_alias(cr, SUPERUSER_ID, alias_vals, model_name=context.get('alias_model_name', child_model_name))
child_class_model.write(cr, SUPERUSER_ID, obj_data['id'], {'alias_id': alias_id})
_logger.info('Mail alias created for %s %s (uid %s)', child_model_name, obj_data[alias_key], obj_data['id'])
# Finally attempt to reinstate the missing constraint
try:
cr.execute('ALTER TABLE %s ALTER COLUMN alias_id SET NOT NULL' % (child_table_name))
except Exception:
_logger.warning("Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
child_table_name, 'alias_id', child_table_name, 'alias_id')
# set back the unique alias_id constraint
alias_id_column.required = True
def create_unique_alias(self, cr, uid, vals, model_name=None, context=None):
"""Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
value, if provided,
"""
# when an alias name appears to already be an email, we keep the local part only
alias_name = remove_accents(vals['alias_name']).lower().split('@')[0]
alias_name = re.sub(r'[^\w+.]+', '-', alias_name)
alias_name = self._find_unique(cr, uid, alias_name, context=context)
vals['alias_name'] = alias_name
if model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)], context=context)[0]
vals['alias_model_id'] = model_id
return self.create(cr, uid, vals, context=context)
|
inovtec-solutions/OpenERP
|
openerp/addons/mail/mail_alias.py
|
Python
|
agpl-3.0
| 11,521
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes the HRV stitcher data source class
"""
import pandas as pd
import numpy as np
from scipy.io import loadmat
from scipy.interpolate import UnivariateSpline
from data_source import DataSource
from schema import Schema, Or, Optional
def _val(x, pos, label_bin):
return np.mean(x)
def _std(x, pos, label_bin):
return x.std(axis=0)
def _sem(x, pos, label_bin):
return x.sem(axis=0)
def _var(x, pos, label_bin):
return np.var(x)
class HRVStitcher(DataSource):
def __init__(self, config, schedule):
# Call the parent class init
super(HRVStitcher, self).__init__(config, schedule)
self.panels = {'bpm': {'VAL': _val,
'SEM': _sem},
'rr': {'VAL': _val,
'STD': _std},
'twave': {'VAL': _val,
'SEM': _sem}}
def load(self, file_paths):
"""Override for load method to include .mat compatibility."""
self.data['samples'] = pd.read_csv(file_paths['samples'],
comment="#",
delimiter="\t",
skipinitialspace=True,
header=False,
index_col=False,
names=['bpm', 'rr', 'twave'])
raw_mat = loadmat(file_paths['labels'])
events = raw_mat['events'][:, 0]
self.data['labels'] = pd.DataFrame({'flag': events},
index=np.arange(events.size))
def merge_data(self):
"""
Clean and merge the samples and labels data.
"""
# TODO(janmtl): return an error if the files have not been loaded yet.
# Clean the samples data frame and the labels data frame
self.data['samples'] = self._clean_samples(self.data['samples'])
self.data['labels'] = self._clean_labels(self.data['labels'])
self.label_config = self._label_config_to_df(self.config)
# Combine the labels data with the labels configuration
self.data['labels'] = self._merge_labels_and_config(
labels=self.data['labels'],
config=self.label_config)
def bin_data(self):
"""Makes a dict of dicts of pd.Panels at self.output."""
label_bins = self.create_label_bins(self.data['labels'])
major_axis = label_bins.index.values
minor_axis = label_bins.drop(['Start_Time', 'End_Time'], axis=1).columns
minor_axis = minor_axis.append(pd.Index(['stat']))
raw = self.data['samples']
output = {channel: pd.Panel(items=statistics.keys(),
major_axis=major_axis,
minor_axis=minor_axis)
for channel, statistics in self.panels.iteritems()}
for channel, statistics in self.panels.iteritems():
for stat_name, stat_fun in statistics.iteritems():
new_panel = label_bins.copy(deep=True)
new_panel.drop(['Start_Time', 'End_Time'], axis=1, inplace=True)
new_panel['stat'] = np.nan
cond_lbls = pd.Series(data=zip(label_bins.loc[:, 'Condition'],
label_bins.loc[:, 'Label'])
).unique()
for cond_lbl in cond_lbls:
sel = (label_bins.loc[:, 'Condition'] == cond_lbl[0]) \
& (label_bins.loc[:, 'Label'] == cond_lbl[1])
sel_bins = label_bins.loc[sel, :]
samples = pd.Series(name=channel)
pos = pd.Series(name='pos')
for _, label_bin in sel_bins.iterrows():
selector = (raw.index.values >= label_bin['Start_Time']) \
& (raw.index.values < label_bin['End_Time'])
samples = samples.append(raw.loc[selector, channel])
pos = pos.append(raw.loc[selector, 'pos'])
stat = stat_fun(samples, pos)
new_panel.loc[sel, 'stat'] = stat
output[channel][stat_name] = new_panel.sort('Bin_Order')
self.output = output
@staticmethod
def _label_config_to_df(config):
"""Convert the label configuration dictionary to a data frame."""
labels_list = []
for event_type, label_config in config.iteritems():
pattern = label_config['pattern']
if isinstance(pattern, dict):
for event_group, flag in label_config['pattern'].iteritems():
labels_list.append({
'Label': event_type,
'Condition': event_group,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': flag})
elif isinstance(pattern, int):
labels_list.append({
'Label': event_type,
'Condition': np.nan,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': pattern})
else:
raise Exception('Bad Biopac config flag {}'.format(pattern))
return pd.DataFrame(labels_list)
@staticmethod
def _clean_labels(labels):
"""
Turn the Biopac flag channel into a data frame of label flags and start
times.
"""
# TODO(janmtl): finish this docstring
flags = labels['flag'].values
low_offset = np.append(-255, flags)
high_offset = np.append(flags, flags[-1])
event_flags = flags[(low_offset-high_offset) != 0]
start_times = np.where((low_offset-high_offset) != 0)[0]
labels = pd.DataFrame({'flag': event_flags,
'Start_Time': start_times})
labels = labels[(labels['flag'] != 255)]
return labels
@staticmethod
def _clean_samples(samples):
"""
.
"""
scale = 0.55
samples.index = samples.index*100
for col_name, col in samples.iteritems():
x = col.index
y = col.values
spl = UnivariateSpline(x, y, k=5, s=scale*len(x))
samples[col_name] = spl(x)
samples['pos'] = True
return samples
@staticmethod
def _merge_labels_and_config(labels, config):
"""
Merge together the contents of the labels file with the label
configuration dictionary.
"""
labels = pd.merge(labels, config, on='flag')
labels.sort('Start_Time', inplace=True)
return labels
def create_label_bins(self, labels):
"""Replace the N_Bins column with Bin_Index and the Duration column
with End_Time. This procedure grows the number of rows in the labels
data frame."""
total_bins = labels['N_Bins'].sum()
label_bins = pd.DataFrame(columns=['Order', 'ID', 'Label',
'Condition', 'Bin_Order',
'Start_Time', 'End_Time',
'Bin_Index'],
index=np.arange(0, total_bins))
idx = 0
for _, label in labels.iterrows():
n_bins = label['N_Bins']
cuts = np.linspace(start=label['Start_Time'] + label['Left_Trim'],
stop=(label['Start_Time']
+ label['Duration']
- label['Right_Trim']),
num=n_bins+1)
label_info = np.tile(label.as_matrix(columns=['Label',
'Condition']),
(n_bins, 1))
# Order and ID
label_bins.iloc[idx:idx+n_bins, 0:2] = np.nan
# Label, Condition
label_bins.iloc[idx:idx+n_bins, 2:4] = label_info
# Bin_Order
label_bins.iloc[idx:idx+n_bins, 4] = idx+np.arange(0, n_bins, 1)
# Start_Time
label_bins.iloc[idx:idx+n_bins, 5] = cuts[0:n_bins]
# End_Time
label_bins.iloc[idx:idx+n_bins, 6] = cuts[1:n_bins+1]
# Bin_Index
label_bins.iloc[idx:idx+n_bins, 7] = np.arange(0, n_bins, 1)
idx = idx + n_bins
# Add the Order by iterating over Labels and Bin indices
for lc, group in label_bins.groupby(['Label', 'Bin_Index']):
selector = (label_bins['Label'] == lc[0]) & \
(label_bins['Bin_Index'] == lc[1])
label_bins.loc[selector, 'Order'] = \
np.arange(0, np.sum(selector), 1)
return label_bins
@staticmethod
def _validate_config(raw):
"""
Validates the label configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{event_type (str):
{
duration: (float or int),
bins: (int),
pattern: dictionary of flags keyed by group
}
}
"""
# TODO(janmtl): improve this docstring
schema = Schema({str: {'duration': Or(float, int),
'bins': int,
'pattern': Or(int, {str: int}),
Optional('left_trim'): Or(float, int),
Optional('right_trim'): Or(float, int)}})
return schema.validate(raw)
@staticmethod
def _validate_schedule(raw):
"""
Validates the schedule configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{file_type (str): pattern (str)}
"""
schema = Schema({str: str})
return schema.validate(raw)
|
janmtl/pypsych
|
pypsych/data_sources/hrvstitcher.py
|
Python
|
bsd-3-clause
| 10,502
|
from abc import ABC, abstractmethod
from typing import Any, Dict, Type
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import ConfigModel
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.transformer.dataset_transformer import DatasetTransformer
from datahub.metadata.schema_classes import (
DatasetPropertiesClass,
DatasetSnapshotClass,
MetadataChangeEventClass,
)
class AddDatasetPropertiesResolverBase(ABC):
@abstractmethod
def get_properties_to_add(self, current: DatasetSnapshotClass) -> Dict[str, str]:
pass
class AddDatasetPropertiesConfig(ConfigModel):
add_properties_resolver_class: Type[AddDatasetPropertiesResolverBase]
class Config:
arbitrary_types_allowed = True
_resolve_properties_class = pydantic_resolve_key("add_properties_resolver_class")
class AddDatasetProperties(DatasetTransformer):
"""Transformer that adds properties to datasets according to a callback function."""
ctx: PipelineContext
config: AddDatasetPropertiesConfig
def __init__(
self,
config: AddDatasetPropertiesConfig,
ctx: PipelineContext,
**resolver_args: Dict[str, Any],
):
self.ctx = ctx
self.config = config
self.resolver_args = resolver_args
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetProperties":
config = AddDatasetPropertiesConfig.parse_obj(config_dict)
return cls(config, ctx)
def transform_one(self, mce: MetadataChangeEventClass) -> MetadataChangeEventClass:
if not isinstance(mce.proposedSnapshot, DatasetSnapshotClass):
return mce
properties_to_add = self.config.add_properties_resolver_class( # type: ignore
**self.resolver_args
).get_properties_to_add(mce.proposedSnapshot)
if properties_to_add:
properties = builder.get_or_add_aspect(
mce, DatasetPropertiesClass(customProperties={})
)
properties.customProperties.update(properties_to_add)
return mce
class SimpleAddDatasetPropertiesConfig(ConfigModel):
properties: Dict[str, str]
class SimpleAddDatasetPropertiesResolverClass(AddDatasetPropertiesResolverBase):
def __init__(self, properties: Dict[str, str]):
self.properties = properties
def get_properties_to_add(self, current: DatasetSnapshotClass) -> Dict[str, str]:
return self.properties
class SimpleAddDatasetProperties(AddDatasetProperties):
"""Transformer that adds a specified set of properties to each dataset."""
def __init__(self, config: SimpleAddDatasetPropertiesConfig, ctx: PipelineContext):
generic_config = AddDatasetPropertiesConfig(
add_properties_resolver_class=SimpleAddDatasetPropertiesResolverClass
)
resolver_args = {"properties": config.properties}
super().__init__(generic_config, ctx, **resolver_args)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "SimpleAddDatasetProperties":
config = SimpleAddDatasetPropertiesConfig.parse_obj(config_dict)
return cls(config, ctx)
|
linkedin/WhereHows
|
metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_properties.py
|
Python
|
apache-2.0
| 3,309
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import time
from proton.reactor import Reactor
class Logger:
def on_unhandled(self, name, event):
print("LOG:", name, event)
class Program:
def on_reactor_init(self, event):
print("Hello, World!")
def on_reactor_final(self, event):
print("Goodbye, World!")
# You can pass multiple handlers to a reactor when you construct it.
# Each of these handlers will see every event the reactor sees. By
# combining this with on_unhandled, you can log each event that goes
# to the reactor.
r = Reactor(Program(), Logger())
r.run()
# Note that if you wanted to add the logger later, you could also
# write the above as below. All arguments to the reactor are just
# added to the default handler for the reactor.
def logging_enabled():
return False
r = Reactor(Program())
if logging_enabled():
r.handler.add(Logger())
r.run()
|
Karm/qpid-proton
|
examples/python/reactor/reactor-logger.py
|
Python
|
apache-2.0
| 1,713
|
import collections
import datetime
import json
import logging
import requests
logger = logging.getLogger(__name__)
__all__ = ['WaniKani', 'Radical', 'Kanji', 'Vocabulary']
WANIKANI_BASE = 'https://www.wanikani.com/api/v1.4/user/{0}/{1}'
def split(func):
# From http://stackoverflow.com/a/21767522/622650
def iter_baskets_contiguous(items, maxbaskets=3, item_count=None):
'''
generates balanced baskets from iterable, contiguous contents
provide item_count if providing a iterator that doesn't support len()
'''
item_count = int(item_count or len(items))
baskets = min(item_count, maxbaskets)
items = iter(items)
floor = int(item_count // baskets)
ceiling = floor + 1
stepdown = item_count % baskets
for x_i in range(int(baskets)):
length = ceiling if x_i < stepdown else floor
yield [next(items) for _ in range(length)]
def wrapper(self, levels):
# If levels is None, then we're getting all levels for the user
# and may need to split it up into multiple queries to avoid timeouts
if levels is None:
logger.debug('Splitting levels %s', levels)
level = self.profile()['level']
step = level / 10
for basket in iter_baskets_contiguous(range(1, level + 1), step):
logger.debug('Loading chunk %s', basket)
for item in func(self, ','.join([str(i) for i in basket])):
yield item
else:
for item in func(self, levels):
yield item
return wrapper
class BaseObject(object):
def __init__(self, raw):
self.raw = raw
@property
def next_review(self):
if self.raw['user_specific'] is None:
return None
return datetime.datetime.fromtimestamp(
self.raw['user_specific']['available_date']
)
@property
def unlocked(self):
return self.raw['user_specific']['unlocked_date']
@property
def burned(self):
if self.raw['user_specific']['burned']:
return self.raw['user_specific']['burned_date']
return None
@property
def srs(self):
try:
return self.raw['user_specific']['srs']
except TypeError:
# Likely an object that has not been learned yet
return None
@property
def srs_numeric(self):
try:
return self.raw['user_specific']['srs_numeric']
except (AttributeError, TypeError):
return 0
def __getitem__(self, key):
if key in self.raw:
return self.raw[key]
if self.raw['user_specific'] is not None:
return self.raw['user_specific'][key]
def __str__(self):
return self.raw['character']
class Radical(BaseObject):
def __repr__(self):
if self.raw['character']:
return '<Radical: {0}>'.format(self.raw['character'].encode('utf8'))
# Some characters do not have a unicode representation
return '<Radical: No Unicode>'
class Kanji(BaseObject):
def __repr__(self):
return '<Kanji: {0}>'.format(self.raw['character'].encode('utf8'))
class Vocabulary(BaseObject):
def __str__(self):
return '{0} [{1}]'.format(self.raw['character'], self.raw['kana'])
def __repr__(self):
return '<Vocabulary: {0}>'.format(self.raw['character'].encode('utf8'))
class WaniKani(object):
def __init__(self, api_key):
self.api_key = api_key
self.session = requests.Session()
def get(self, *args, **kwargs):
result = self.session.get(*args, **kwargs)
result.raise_for_status()
return result.json
def profile(self):
url = WANIKANI_BASE.format(self.api_key, 'user-information')
data = self.get(url)
return data['user_information']
def level_progress(self):
url = WANIKANI_BASE.format(self.api_key, 'level-progression')
data = self.get(url)
merged = data['requested_information']
merged['user_information'] = data['user_information']
return merged
def recent_unlocks(self, limit=10):
url = WANIKANI_BASE.format(self.api_key, 'recent-unlocks')
data = self.get(url)
mapping = {
'vocabulary': Vocabulary,
'kanji': Kanji,
'radical': Radical,
}
for item in data['requested_information']:
klass = mapping[item['type']]
yield klass(item)
def critical_items(self, percentage=75):
url = WANIKANI_BASE.format(self.api_key, 'critical-items')
if percentage:
url += '/{0}'.format(percentage)
data = self.get(url)
mapping = {
'vocabulary': Vocabulary,
'kanji': Kanji,
'radical': Radical,
}
for item in data['requested_information']:
klass = mapping[item['type']]
yield klass(item)
def radicals(self, levels=None):
"""
:param levels string: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
http://www.wanikani.com/api/v1.2#radicals-list
"""
url = WANIKANI_BASE.format(self.api_key, 'radicals')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Radical(item)
def kanji(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#kanji-list
"""
url = WANIKANI_BASE.format(self.api_key, 'kanji')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Kanji(item)
@split
def vocabulary(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
"""
url = WANIKANI_BASE.format(self.api_key, 'vocabulary')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
if 'general' in data['requested_information']:
for item in data['requested_information']['general']:
yield Vocabulary(item)
else:
for item in data['requested_information']:
yield Vocabulary(item)
def upcoming(self, levels=None):
"""
Return mapping of upcoming items
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
:return: Returns dictionary of items with datetime as the key
"""
return self.query(levels, exclude=[u'burned'])
def burning(self):
return self.query(include=[u'enlighten'])
def query(self, levels=None, items=[Radical, Kanji, Vocabulary], exclude=[], include=[]):
mapping = {
Radical: self.radicals,
Kanji: self.kanji,
Vocabulary: self.vocabulary
}
queue = collections.defaultdict(list)
for klass in items:
for obj in mapping[klass](levels):
if exclude and obj.srs in exclude:
continue
if include and obj.srs not in include:
continue
if obj.next_review:
queue[obj.next_review].append(obj)
return queue
|
kfdm/wanikani
|
wanikani/core.py
|
Python
|
mit
| 8,225
|
import cgi
import datetime
import urllib
import webapp2
import jinja2
import os
from google.appengine.ext import ndb
from google.appengine.api import users
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
def userproofs_key(email=None):
return ndb.Key('userproofs', email)
class Proofs(ndb.Model):
title = ndb.StringProperty(indexed=False)
description = ndb.StringProperty(indexed=False)
serializedProof = ndb.StringProperty(indexed=False)
class IndexHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
proofList = []
if user:
greeting = ('Welcome, <a href="#" class="username">%s!</a> (<a href="%s">Sign out</a>)' %
(user.nickname(), users.create_logout_url('/')))
proof_query = Proofs.query(ancestor=userproofs_key(user.email()))
proofs = proof_query.fetch(10)
for proof in proofs:
proofList.append( """
<div class="panel panel-default loadProof shadow">
<div class="panel-body">
<h3>%s</h3>
<blockquote>
<p>%s</p>
</blockquote>
<input type="hidden" id="jsonProof" value='%s' />
</div>
</div> """ % (proof.title, proof.description, proof.serializedProof))
else:
greeting = ('<a href="%s">Sign in or register</a>.' %
users.create_login_url('/'))
template_values = {
"user": user,
"greeting": greeting,
"proofList": proofList
}
template = jinja_environment.get_template('templates/index.html')
self.response.out.write(template.render(template_values))
class SaveTheProof(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if(user):
proofData = Proofs(parent=userproofs_key(users.get_current_user().email()))
proofData.title = self.request.get('saveFormTitle')
proofData.description = self.request.get('saveFormDesc')
proofData.serializedProof = self.request.get('serializedProof')
proofData.put()
self.redirect('/')
app = webapp2.WSGIApplication([
('/', IndexHandler),
('/saveproof', SaveTheProof)
], debug=True)
|
rcxking/pierce_logic_reloaded
|
appengine/main.py
|
Python
|
bsd-3-clause
| 2,440
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import unittest
if sys.version_info >= (3, 0):
from unittest.mock import MagicMock, patch
from urllib.error import HTTPError
else:
from mock import MagicMock, patch
from urllib2 import HTTPError
from shellfoundry.exceptions import FatalError
from shellfoundry.utilities.cloudshell_api import (
CloudShellClient,
create_cloudshell_client,
)
patch.object = patch.object
class TestClientWrapper(unittest.TestCase):
def test_client_wrapper_raises_an_error_when_create_client_fails(self):
# Act
with patch.object(
CloudShellClient, "create_client", side_effect=FatalError("failure")
):
with self.assertRaises(FatalError) as context:
create_cloudshell_client()
# Assert
self.assertEqual(context.exception.message, "failure")
@patch(
"shellfoundry.utilities.cloudshell_api.client_wrapper.PackagingRestApiClient"
)
def test_client_wrapper_raises_an_error_when_create_client_fails_after_retries_regular_exception( # noqa: E501
self, api_mock
):
# Arrange
api_mock.side_effect = [Exception(), Exception()]
# Act
with self.assertRaises(FatalError) as context:
create_cloudshell_client(retries=2)
# Assert
self.assertEqual(
context.exception.message, CloudShellClient.ConnectionFailureMessage
)
@patch(
"shellfoundry.utilities.cloudshell_api.client_wrapper.PackagingRestApiClient",
new_callable=MagicMock(),
)
def test_client_wrapper_raises_an_error_when_create_client_fails_after_retries_http_error( # noqa: E501
self, api_mock
):
# Arrange
error = HTTPError("url", 401, "not found", None, None)
api_mock.side_effect = [error, error]
# Act
with self.assertRaises(FatalError) as context:
create_cloudshell_client(retries=2)
# Assert
self.assertEqual(
context.exception.message,
u"Login to CloudShell failed. Please verify the credentials in the config",
)
@patch(
"shellfoundry.utilities.cloudshell_api.client_wrapper.PackagingRestApiClient"
)
def test_client_wrapper_raises_an_error_when_create_client_fails_after_retries(
self, api_mock
):
# Arrange
api_mock.side_effect = [Exception(), api_mock]
# Act
with patch.object(
CloudShellClient, "create_client", side_effect=FatalError("failure")
):
with self.assertRaises(FatalError) as context:
create_cloudshell_client(retries=2)
# Assert
self.assertEqual(context.exception.message, "failure")
@patch(
"shellfoundry.utilities.cloudshell_api.client_wrapper.PackagingRestApiClient"
)
def test_client_wrapper_creates_client_successfully(self, api_mock):
# Arrange
api_mock.return_value = api_mock
# Act
cs_client = create_cloudshell_client()
# Assert
self.assertEqual(cs_client, api_mock)
@patch(
"shellfoundry.utilities.cloudshell_api.client_wrapper.PackagingRestApiClient"
)
def test_client_wrapper_creates_client_successfully_after_initial_exception(
self, api_mock
):
# Arrange
api_mock.side_effect = [Exception(), api_mock]
# Act
cs_client = create_cloudshell_client(retries=2)
# Assert
self.assertEqual(cs_client, api_mock)
|
QualiSystems/shellfoundry
|
tests/test_utilities/test_cloudshell_api/test_client_wrapper.py
|
Python
|
apache-2.0
| 3,577
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import UInt16
def recv_buzzer(data):
rospy.loginfo(type(data))
rospy.loginfo(data.data)
if __name__ == '__main__':
rospy.init_node('buzzer')
rospy.Subscriber("buzzer", UInt16, recv_buzzer)
rospy.spin()
|
Sourpomelo-Y6/pimouse_ros
|
scripts/buzzer2.py
|
Python
|
gpl-3.0
| 262
|
import codecs
import os
from hashlib import sha512
import config
# Generate random bytes for the salt
thebytes = os.urandom(100)
hexstr = str(codecs.encode(thebytes, 'hex'))
salt = hexstr[3:14]
# Hash the password with the salt
tohash = config.password + salt
tohash = tohash.encode("utf8")
hashed = sha512(tohash).hexdigest()
# Remove the config.py file which contains the plaintext password
for fn in ("config.py", "config.pyc"):
try:
os.remove(fn)
except OSError as e:
print e
# Output in correct format
print("sha512:{}:{}".format(salt, hashed))
|
sixhobbits/jupyter-setup
|
hash_password.py
|
Python
|
apache-2.0
| 581
|
import sys
import time
from mpi4py.futures import MPICommExecutor
x0 = -2.0
x1 = +2.0
y0 = -1.5
y1 = +1.5
w = 1600
h = 1200
dx = (x1 - x0) / w
dy = (y1 - y0) / h
def julia(x, y):
c = complex(0, 0.65)
z = complex(x, y)
n = 255
while abs(z) < 3 and n > 1:
z = z**2 + c
n -= 1
return n
def julia_line(k):
line = bytearray(w)
y = y1 - k * dy
for j in range(w):
x = x0 + j * dx
line[j] = julia(x, y)
return line
def plot(image):
import warnings
warnings.simplefilter('ignore', UserWarning)
try:
from matplotlib import pyplot as plt
except ImportError:
return
plt.figure()
plt.imshow(image, aspect='equal', cmap='cubehelix')
plt.axis('off')
try:
plt.draw()
plt.pause(2)
except:
pass
def test_julia():
with MPICommExecutor() as executor:
if executor is None: return # worker process
tic = time.time()
image = list(executor.map(julia_line, range(h), chunksize=10))
toc = time.time()
print("%s Set %dx%d in %.2f seconds." % ('Julia', w, h, toc-tic))
if len(sys.argv) > 1 and sys.argv[1] == '-plot':
plot(image)
if __name__ == '__main__':
test_julia()
|
mpi4py/mpi4py
|
demo/futures/run_julia.py
|
Python
|
bsd-2-clause
| 1,252
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# License: MIT. See LICENSE
# import frappe
import unittest
class TestModuleOnboarding(unittest.TestCase):
pass
|
frappe/frappe
|
frappe/desk/doctype/module_onboarding/test_module_onboarding.py
|
Python
|
mit
| 197
|
#===============================================================================
#
# ELL.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2021 Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
attribute_decl = {
'local':
"""
// Local %(attr_type)s %(name)s
std::vector< %(type)s > %(name)s;
""",
'semiglobal':
"""
// Semiglobal %(attr_type)s %(name)s
std::vector< %(type)s > %(name)s ;
""",
'global':
"""
// Global %(attr_type)s %(name)s
%(type)s %(name)s ;
"""
}
attribute_cpp_init = {
'local':
"""
// Local %(attr_type)s %(name)s
%(name)s = init_matrix_variable<%(type)s>(static_cast<%(type)s>(%(init)s));
""",
'semiglobal':
"""
// Semiglobal %(attr_type)s %(name)s
%(name)s = init_vector_variable<%(type)s>(static_cast<%(type)s>(%(init)s));
""",
'global':
"""
// Global %(attr_type)s %(name)s
%(name)s = %(init)s;
"""
}
attribute_cpp_size = {
'local': """
// Local %(attr_type)s %(name)s
size_in_bytes += sizeof(std::vector<%(ctype)s>);
size_in_bytes += sizeof(%(ctype)s) * %(name)s.capacity();
""",
'semiglobal': """
// Semiglobal %(attr_type)s %(name)s
size_in_bytes += sizeof(std::vector<%(ctype)s>);
size_in_bytes += sizeof(%(ctype)s) * %(name)s.capacity();
""",
'global': """
// Global
size_in_bytes += sizeof(%(ctype)s);
"""
}
attribute_cpp_delete = {
'local': """
// %(name)s
%(name)s.clear();
%(name)s.shrink_to_fit();
""",
'semiglobal': """
// %(name)s
%(name)s.clear();
%(name)s.shrink_to_fit();
""",
'global': ""
}
#############################################
## Synaptic delay
#############################################
delay = {
'uniform': {
'declare': """
// Uniform delay
int delay ;""",
'pyx_struct':
"""
# Uniform delay
int delay""",
'init': """
delay = delays[0][0];
""",
'pyx_wrapper_init':
"""
proj%(id_proj)s.delay = syn.uniform_delay""",
'pyx_wrapper_accessor':
"""
# Access to non-uniform delay
def get_delay(self):
return proj%(id_proj)s.delay
def get_dendrite_delay(self, idx):
return proj%(id_proj)s.delay
def set_delay(self, value):
proj%(id_proj)s.delay = value
"""},
'nonuniform_rate_coded': {
'declare': """
std::vector<int> delay;
int max_delay;
std::vector<std::vector<int>> get_delay() { return get_matrix_variable_all<int>(delay); }
void set_delay(std::vector<std::vector<int>> value) { update_matrix_variable_all<int>(delay, value); }
std::vector<int> get_dendrite_delay(int lil_idx) { return get_matrix_variable_row<int>(delay, lil_idx); }
""",
'init': """
delay = init_variable<int>(1);
update_variable_all<int>(delay, delays);
""",
'reset': "",
'pyx_struct':
"""
# Non-uniform delay
vector[vector[int]] get_delay()
void set_delay(vector[vector[int]])
vector[int] get_dendrite_delay(int)
int max_delay
void update_max_delay(int)
void reset_ring_buffer()
""",
'pyx_wrapper_init': "",
'pyx_wrapper_accessor':
"""
# Access to non-uniform delay
def get_delay(self):
return proj%(id_proj)s.get_delay()
def get_dendrite_delay(self, idx):
return proj%(id_proj)s.get_dendrite_delay(idx)
def set_delay(self, value):
proj%(id_proj)s.set_delay(value)
def get_max_delay(self):
return proj%(id_proj)s.max_delay
def set_max_delay(self, value):
proj%(id_proj)s.max_delay = value
def update_max_delay(self, value):
proj%(id_proj)s.update_max_delay(value)
def reset_ring_buffer(self):
proj%(id_proj)s.reset_ring_buffer()
"""
},
'nonuniform_spiking': {
'declare': """
std::vector<int> delay;
int max_delay;
int idx_delay;
std::vector< std::vector< std::vector< int > > > _delayed_spikes;
""",
'init': """
delay = init_variable<int>(1);
update_variable_all<int>(delay, delays);
idx_delay = 0;
max_delay = pop%(id_pre)s.max_delay;
""",
'reset': """
while(!_delayed_spikes.empty()) {
auto elem = _delayed_spikes.back();
elem.clear();
_delayed_spikes.pop_back();
}
idx_delay = 0;
max_delay = pop%(id_pre)s.max_delay ;
_delayed_spikes = std::vector< std::vector< std::vector< int > > >(max_delay, std::vector< std::vector< int > >(post_rank.size(), std::vector< int >()) );
""",
'pyx_struct':
"""
# Non-uniform delay
vector[vector[int]] delay
int max_delay
void update_max_delay(int)
void reset_ring_buffer()
""",
'pyx_wrapper_init': "",
'pyx_wrapper_accessor':
"""
# Access to non-uniform delay
def get_delay(self):
return proj%(id_proj)s.delay
def get_dendrite_delay(self, idx):
return proj%(id_proj)s.delay[idx]
def set_delay(self, value):
proj%(id_proj)s.delay = value
def get_max_delay(self):
return proj%(id_proj)s.max_delay
def set_max_delay(self, value):
proj%(id_proj)s.max_delay = value
def update_max_delay(self, value):
proj%(id_proj)s.update_max_delay(value)
def reset_ring_buffer(self):
proj%(id_proj)s.reset_ring_buffer()
"""
}
}
###############################################################
# Rate-coded continuous transmission
###############################################################
ell_summation_operation = {
'sum' : """
%(pre_copy)s
const %(idx_type)s nonvalue_idx = std::numeric_limits<%(idx_type)s>::max();
%(size_type)s ell_row_off, j;
%(idx_type)s nb_post = static_cast<%(idx_type)s>(post_ranks_.size());
for (%(idx_type)s i = 0; i < nb_post; i++) {
%(idx_type)s rk_post = post_ranks_[i]; // Get postsynaptic rank
sum = 0.0;
ell_row_off = i * maxnzr_;
for(j = ell_row_off; j < ell_row_off+maxnzr_; j++) {
%(idx_type)s rk_pre = col_idx_[j];
if (rk_pre == nonvalue_idx)
break;
sum += %(psp)s ;
}
pop%(id_post)s._sum_%(target)s%(post_index)s += sum;
}"""
}
###############################################################
# Rate-coded synaptic plasticity
###############################################################
update_variables = {
'local': """
const %(idx_type)s nonvalue_idx = std::numeric_limits<%(idx_type)s>::max();
// Check periodicity
if(_transmission && _update && pop%(id_post)s._active && ( (t - _update_offset)%%_update_period == 0L) ){
// Global variables
%(global)s
// Local variables
for(%(size_type)s i = 0; i < post_ranks_.size(); i++){
rk_post = post_ranks_[i]; // Get postsynaptic rank
// Semi-global variables
%(semiglobal)s
// Local variables
for(size_t j = i*maxnzr_; j < (i+1)*maxnzr_; j++) {
rk_pre = col_idx_[j]; // Get presynaptic rank
if (rk_pre == nonvalue_idx)
break;
%(local)s
}
}
}
"""
}
conn_templates = {
# accessors
'attribute_decl': attribute_decl,
'attribute_cpp_init': attribute_cpp_init,
'attribute_cpp_size': attribute_cpp_size,
'attribute_cpp_delete': attribute_cpp_delete,
'delay': delay,
'rate_coded_sum': ell_summation_operation,
'vectorized_default_psp': {},
'update_variables': update_variables
}
conn_ids = {
'local_index': '[j]',
'semiglobal_index': '[i]',
'global_index': '',
'post_index': '[rk_post]',
'pre_index': '[rk_pre]',
'delay_u' : '[delay-1]' # uniform delay
}
|
vitay/ANNarchy
|
ANNarchy/generator/Projection/SingleThread/ELL.py
|
Python
|
gpl-2.0
| 8,508
|
import pytest
import audioset.util as util
@pytest.fixture()
def tf_bytestring():
return (b"\nv\n\x19\n\x06labels\x12\x0f\x1a\r\n\x0b\x00\xac\x02\xb4\x02"
b"\xb5\x02\xbc\x02\xc7\x02\n\x1b\n\x08video_id\x12\x0f\n\r\n\x0b"
b"rmLnozgTQMY\n\x1e\n\x12start_time_seconds\x12\x08\x12\x06\n\x04"
b"\x00\x00\xf0A\n\x1c\n\x10end_time_seconds\x12\x08\x12\x06\n\x04"
b"\x00\x00 B\x12\xa0\x01\n\x9d\x01\n\x0faudio_embedding\x12\x89"
b"\x01\n\x86\x01\n\x83\x01\n\x80\x01\x1b\xac\x18\x00\xee\x8e\xa5"
b"\x9ct\x9b\xb7\xb8\xc1\xe1\xd5ot~iF\x9d\xcc\xab\xf4\xd1\xca\xcf"
b"\xa0\t\x00\xea\xf8U\xef\xd8A\xa3g\nE\xff\x00\xf5\xeb\xb8\xbcn6"
b"\x12\x9cX\x0fa\x8b~ng\xa6\xe3D\xb2\x00g\x1a\xea\x00/\xd0\xa5\xcd"
b"[|\xffR\x14\x00b\xc6\xff8E\x99\x87\xabN\xe2\x9f\xc0\x86\x00\xce"
b"\xff\x8cm\x93\xaf^\x88k\x07xp\xff\xe0\xa3\xe5\x00\x86C&,\x84\xba"
b"\x00\xf1\xfby\xb3k\xcfT\xcd\xc4\x81_n\x83\x94")
def test_filebase():
assert util.filebase('foo/bar.baz') == 'bar'
assert util.filebase('foo/bar.baz.whiz') == 'bar.baz'
assert util.filebase('foo/') == ''
assert util.filebase('') == ''
def test_safe_makedirs(tmpdir):
util.safe_makedirs('foo')
util.safe_makedirs('foo')
util.safe_makedirs('')
def test_bytestring_to_record(tf_bytestring):
features, meta = util.bytestring_to_record(tf_bytestring)
assert features.shape == (1, 128)
assert len(meta) == 1
def test_load_tfrecord(tfrecords):
features, meta = util.load_tfrecord(tfrecords[0])
assert features.std() > 5
assert len(features) == len(meta)
assert len(meta) > 10
|
cosmir/dev-set-builder
|
tests/test_audioset_util.py
|
Python
|
mit
| 1,694
|
from common import * # NOQA
from cattle import ApiError
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/certs')
def test_create_cert_basic(client):
cert = _read_cert("san_domain_com.crt")
key = _read_cert("san_domain_com.key")
cert1 = client. \
create_certificate(name=random_str(),
cert=cert,
key=key)
cert1 = client.wait_success(cert1)
assert cert1.state == 'active'
assert cert1.cert == cert
assert cert1.certFingerprint is not None
assert cert1.expiresAt is not None
assert cert1.CN is not None
assert cert1.issuer is not None
assert cert1.issuedAt is not None
assert cert1.algorithm is not None
assert cert1.version is not None
assert cert1.serialNumber is not None
assert cert1.keySize == 2048
assert cert1.subjectAlternativeNames is not None
def test_dup_names(super_client, client):
cert_input = _read_cert("san_domain_com.crt")
key = _read_cert("san_domain_com.key")
name = random_str()
cert1 = super_client. \
create_certificate(name=name,
cert=cert_input,
key=key)
super_client.wait_success(cert1)
assert cert1.name == name
cert2 = client. \
create_certificate(name=name,
cert=cert_input,
key=key)
cert2 = super_client.wait_success(cert2)
assert cert2.name == name
assert cert2.accountId != cert1.accountId
with pytest.raises(ApiError) as e:
super_client. \
create_certificate(name=name,
cert=cert_input,
key=key)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
def test_create_cert_invalid_cert(client):
cert = _read_cert("cert_invalid.pem")
key = _read_cert("key.pem")
with pytest.raises(ApiError) as e:
client. \
create_certificate(name=random_str(),
cert=cert,
key=key)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidFormat'
def test_create_cert_chain(client):
cert = _read_cert("enduser-example.com.crt")
key = _read_cert("enduser-example.com.key")
chain = _read_cert("enduser-example.com.chain")
cert1 = client. \
create_certificate(name=random_str(),
cert=cert,
key=key,
certChain=chain)
cert1 = client.wait_success(cert1)
assert cert1.state == 'active'
assert cert1.cert == cert
return cert1
def _read_cert(name):
with open(os.path.join(RESOURCE_DIR, name)) as f:
return f.read()
def test_update_cert(client):
cert1 = _read_cert("enduser-example.com.crt")
key1 = _read_cert("enduser-example.com.key")
c1 = client. \
create_certificate(name=random_str(),
cert=cert1,
key=key1)
c1 = client.wait_success(c1)
cert2 = _read_cert("san_domain_com.crt")
key2 = _read_cert("san_domain_com.key")
c2 = client.update(c1, cert=cert2, key=key2)
c2 = client.wait_success(c2, 120)
assert c2.certFingerprint is not None
assert c2.expiresAt is not None
assert c2.CN is not None
assert c2.issuer is not None
assert c2.issuedAt is not None
assert c2.algorithm is not None
assert c2.version is not None
assert c2.serialNumber is not None
assert c2.keySize == 2048
assert c2.subjectAlternativeNames is not None
assert c2.cert == cert2
assert c2.certFingerprint != c1.certFingerprint
assert c2.expiresAt != c1.expiresAt
assert c2.CN != c1.CN
assert c2.issuer != c1.issuer
assert c2.issuedAt != c1.issuedAt
assert c2.serialNumber != c1.serialNumber
|
Cerfoglg/cattle
|
tests/integration-v1/cattletest/core/test_cert.py
|
Python
|
apache-2.0
| 3,961
|
# https://www.w3resource.com/python-exercises/
# 54. Write a Python program to get the current username
import getpass
print(getpass.getuser())
|
dadavidson/Python_Lab
|
Python-w3resource/Python_Basic/ex54.py
|
Python
|
mit
| 145
|
#!/usr/bin/python
from __future__ import division
from __future__ import with_statement
import numpy
import os
import sys
import toolbox_basic
import toolbox_results
import toolbox_schematic_new as toolbox_schematic
pi = numpy.pi
class SimulationDirectory:
def __init__(self, path):
self.path = toolbox_basic.check_path(path)
self.iterate_numbers = []
self.iterate_information = []
self.min_max_concns = {}
# agent_Sum
try:
self.agent_Sum = os.path.join(self.path, 'agent_Sum')
if not os.path.isdir( self.agent_Sum ):
toolbox_basic.unzip_files(self.agent_Sum + '.zip')
self.agent_Sum = toolbox_basic.check_path(self.agent_Sum)
except TypeError:
print('Could not find agent_Sum info! '+self.path)
# agent_State
try:
self.agent_State = os.path.join(self.path, 'agent_State')
if not os.path.isdir( self.agent_State ):
toolbox_basic.unzip_files(self.agent_State + '.zip')
self.agent_State = toolbox_basic.check_path(self.agent_State)
except TypeError:
print('Could not find agent_State info! '+self.path)
# env_Sum
try:
self.env_Sum = os.path.join(self.path, 'env_Sum')
if not os.path.isdir( self.env_Sum ):
toolbox_basic.unzip_files(self.env_Sum + '.zip')
self.env_Sum = toolbox_basic.check_path(self.env_Sum)
except TypeError:
print('Could not find env_Sum info! '+self.path)
# env_State
try:
self.env_State = os.path.join(self.path, 'env_State')
if not os.path.isdir( self.env_State ):
toolbox_basic.unzip_files(self.env_State + '.zip')
self.env_State = toolbox_basic.check_path(self.env_State)
except TypeError:
print('Could not find env_State info! '+self.path)
# Figures directory
self.figures_dir = os.path.join(self.path, 'figures')
if not os.path.isdir(self.figures_dir):
toolbox_basic.make_dir(self.figures_dir)
self.movies_dir = os.path.join(self.path, 'movies')
if not os.path.isdir(self.movies_dir):
toolbox_basic.make_dir(self.movies_dir)
def get_iterate_numbers(self):
"""
Returns a (sorted) list of the iterate numbers, from agent_Sum
"""
if not self.iterate_numbers == []:
return self.iterate_numbers
for f in toolbox_basic.file_list(self.agent_Sum, filetype='*.xml'):
output = toolbox_results.Output(path=f)
self.iterate_numbers.append(output.iterate)
self.iterate_numbers.sort()
return self.iterate_numbers
def get_iterate_information(self):
"""
Tries to read in all of the iterates for this simulation. Can be
time-consuming for large or long simulations.
"""
self.iterate_information = []
for i in self.get_iterate_numbers():
self.iterate_information.append(IterateInformation(self, i))
return self.iterate_information
def get_last_iterate_number(self):
"""
"""
return max(self.get_iterate_numbers())
def get_single_iterate(self, number):
"""
Tries to get information for a single iteration, first by checking the
list of iterates already read in, then by reading in the output files.
"""
for i in self.iterate_information:
if i.number == number:
return i
i = IterateInformation(self, number)
self.iterate_information.append(i)
return i
def get_min_max_concns(self):
"""
"""
if self.min_max_concns == {}:
for solute_name in self.get_solute_names():
self.min_max_concns[solute_name] = [sys.float_info.max, 0.0]
for i in self.get_iterate_information():
iter_min_max = i.get_min_max_concns()
for solute_name in self.min_max_concns.keys():
self.min_max_concns[solute_name] = \
[min(self.min_max_concns[solute_name][0],
iter_min_max[solute_name][0]),
max(self.min_max_concns[solute_name][1],
iter_min_max[solute_name][1])]
return self.min_max_concns
def get_solute_names(self):
"""
"""
return self.get_iterate_information()[0].env_output.get_solute_names()
def get_species_names(self):
"""
"""
return self.get_single_iterate(0).agent_output.get_species_names()
def find_protocol_file_xml_tree(self, filename=None):
"""
"""
if filename is None:
filename = toolbox_basic.find_protocol_file_path(self.path)
self.protocol_file_xml_tree = toolbox_basic.get_xml_tree(filename)
def find_domain_dimensions(self):
"""
TODO Do this via the protocol file.
"""
env0 = self.get_single_iterate(0).env_output
name = env0.get_solute_names()[0]
sol0 = toolbox_results.SoluteOutput(env0, name)
return sol0.grid_nI, sol0.grid_nJ, sol0.grid_nK, sol0.grid_res
'''
try:
pfxt = self.protocol_file_xml_tree
except Error:
self.find_protocol_xml_tree()
'''
def clean_up(self):
"""
Deletes all unzipped output folders TODO
"""
pass
class ProtocolFile:
def __init__(self, path):
pass
class IterateInformation:
def __init__(self, simulation_directory, iterate_number):
self.number = iterate_number
self.min_max_concns = {}
agent_path = os.path.join(simulation_directory.agent_State,
'agent_State(%d).xml'%(iterate_number))
agent_path = toolbox_basic.check_path(agent_path)
self.agent_output = toolbox_results.AgentOutput(path=agent_path)
self.time = self.agent_output.time
env_path = os.path.join(simulation_directory.env_State,
'env_State(%d).xml'%(iterate_number))
env_path = toolbox_basic.check_path(env_path)
self.env_output = toolbox_results.EnvOutput(path=env_path)
def get_min_max_concns(self):
if self.min_max_concns == {}:
for solute_name in self.env_output.get_solute_names():
solute_output = toolbox_results.SoluteOutput(self.env_output,
name=solute_name)
self.min_max_concns[solute_name] = [min(solute_output.values),
max(solute_output.values)]
return self.min_max_concns
def draw_cell_2d(axis, cell_output, total_radius=True, zorder=0, y_limits=None):
"""
"""
(x, y, z) = cell_output.get_location()
rad = cell_output.get_radius(total_radius=total_radius)
if cell_output.color == None:
print 'Cell has no defined color!'
col = (0, 1, 0)
else:
col = cell_output.color
#col = (0, 1, 0) if cell_output.color == None else cell_output.color
#col = cell_output.color
if (y_limits != None) and (y - rad < y_limits[0]):
segment = toolbox_schematic.CircleSegment()
segment.set_defaults(edgecolor='none', facecolor=col, zorder=zorder)
angle = pi - numpy.arccos((y - y_limits[0])/rad)
segment.set_points((y, x), rad, [angle, -angle])
segment.draw(axis)
segment.set_points((y - y_limits[0] + y_limits[1], x), rad, [angle, 2*pi-angle])
segment.draw(axis)
elif (y_limits != None) and (y + rad > y_limits[1]):
segment = toolbox_schematic.CircleSegment()
segment.set_defaults(edgecolor='none', facecolor=col, zorder=zorder)
angle = numpy.arccos((y_limits[1] - y)/rad)
segment.set_points((y, x), rad, [angle, 2*pi-angle])
segment.draw(axis)
segment.set_points((y + y_limits[0] - y_limits[1], x), rad, [-angle, angle])
segment.draw(axis)
else:
circle = toolbox_schematic.Circle()
circle.set_defaults(edgecolor='none', facecolor=col, zorder=zorder)
circle.set_points((y, x), rad)
circle.draw(axis)
def plot_cells_2d(axis, agent_output, zorder=0):
"""
"""
print('Plotting %d cells'%(len(agent_output.get_all_cells())))
width = agent_output.grid_nJ * agent_output.grid_res
y_lims = [0, width]
for cell in agent_output.get_all_cells():
draw_cell_2d(axis, cell, zorder=zorder, y_limits=y_lims)
def draw_cell_3d(axis, cell_output, total_radius=True, zorder=0, y_limits=None):
"""
"""
(x, y, z) = cell_output.get_location()
rad = cell_output.get_radius(total_radius=total_radius)
if cell_output.color == None:
print 'Cell has no defined color!'
col = (0, 1, 0)
else:
col = cell_output.color
#col = (0, 1, 0) if cell_output.color == None else cell_output.color
#col = cell_output.color
sphere = toolbox_schematic.Sphere()
sphere.set_defaults(edgecolor='none', facecolor=col, zorder=zorder)
sphere.set_points((y, z, x+4), rad)
sphere.draw(axis)
def plot_cells_3d(axis, agent_output, zorder=0):
"""
"""
res = agent_output.grid_res
width = agent_output.grid_nJ * res
height = agent_output.grid_nI * res
depth = agent_output.grid_nK * res
num_cells = len(agent_output.get_all_cells())
counter = 0
for cell in agent_output.get_all_cells():
draw_cell_3d(axis, cell, zorder=zorder)
counter += 1
sys.stdout.write('\r')
i = int(20*counter/num_cells)
sys.stdout.write("Plotting cells [%-20s] %d%%" % ('='*i, 5*i))
sys.stdout.flush()
sys.stdout.write('\n')
axis.set_xlim(0, width)
axis.set_ylim(0, depth)
axis.set_zlim(0, height)
def get_default_species_colors(sim):
colors = ['red', 'blue', 'green', 'cyan', 'yellow', 'purple', 'brown']
out = {}
for species_name in sim.get_species_names():
if len(colors) == 0:
print "Not enough default colors for so many species!"
return out
out[species_name] = colors.pop(0)
return out
def save_color_dict(color_dict, file_path):
script = 'Item\t\tColor\n'
for key, value in color_dict.iteritems():
script += str(key)+'\t\t'+str(value)+'\n'
with open(file_path, 'w') as f:
f.write(script)
def read_color_dict(file_path):
out = {}
file_path = toolbox_basic.check_path(file_path)
with open(file_path, 'Ur') as f:
for line in f.readlines()[1:]:
line = line.replace('\n', '')
vals = line.split('\t\t')
out[vals[0]] = vals[1]
return out
def color_cells_by_species(agent_output, species_color_dict):
"""
"""
for species in agent_output.species_outputs:
print('Colouring %d %s cells %s'
%(len(species.members), species.name, species_color_dict[species.name]))
for cell in species.members:
cell.color = species_color_dict[species.name]
# Find a list of standard colormaps (cmap) at
# http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
# It is also possible to define your own
def solute_contour(axis, solute_output, interpolation='nearest', zorder=-10,
cmap='gray', concn_range=[None]*2, array_multiplier=1):
"""
"""
width = solute_output.grid_nJ * solute_output.grid_res
height = solute_output.grid_nI * solute_output.grid_res
extent = [0, width, 0, height]
array = solute_output.concentration_array()
if not array_multiplier == 1:
array = numpy.multiply(array, array_multiplier)
cs = axis.imshow(array,
interpolation=interpolation, origin='lower', cmap=cmap,
extent=extent, zorder=zorder, vmin=concn_range[0], vmax=concn_range[1])
return cs
def solute_contour_3d(axis, solute_output, zorder=-10,
cmap='gray', concn_range=[None]*2, array_multiplier=1):
"""
"""
array = solute_output.concentration_array()
# The array will be in 3D
if not array_multiplier == 1:
array = numpy.multiply(array, array_multiplier)
if not concn_range == [None]*2:
concn_range = [numpy.min(array), numpy.max(array)]
levels = numpy.linspace(concn_range[0], concn_range[1], 128)
res = solute_output.grid_res
nI = solute_output.grid_nI
nJ = solute_output.grid_nJ
nK = solute_output.grid_nK
Y, Z = numpy.meshgrid(numpy.linspace(0, res*nK, nK),
numpy.linspace(0, res*nI, nI))
axis.contourf(array[:, :, 0], Y, Z, zdir='x', cmap=cmap, offset=0,
zorder=zorder, levels=levels)
X, Z = numpy.meshgrid(numpy.linspace(0, res*nJ, nJ),
numpy.linspace(0, res*nI, nI))
cs = axis.contourf(X, array[:, 0, :], Z, zdir='y', cmap=cmap, offset=0,
zorder=zorder, levels=levels)
# Plots a black surface at the bottom. Could be done better!
array = numpy.ones([nJ, nK])*concn_range[0]
X, Y = numpy.meshgrid(numpy.linspace(0, res*nJ, nJ),
numpy.linspace(0, res*nK, nK))
axis.contourf(X, Y, array, zdir='z', cmap='gray', offset=0,
zorder=zorder, levels=levels)
X = [0, 0, 0, res*nJ, res*nJ]
Y = [res*nK, res*nK, 0, 0, 0]
Z = [0, res*nI, res*nI, res*nI, 0]
axis.plot(X, Y, Z, 'k-')
return cs
|
roughhawkbit/robs-python-scripts
|
toolbox_idynomics.py
|
Python
|
mit
| 13,823
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import api, SUPERUSER_ID
_logger = logging.getLogger(__name__)
def post_init_hook(cr, registry):
"""
Create a payment group for every existint payment
"""
env = api.Environment(cr, SUPERUSER_ID, {})
# payments = env['account.payment'].search(
# [('payment_type', '!=', 'transfer')])
# on v10, on reconciling from statements, if not partner is choosen, then
# a payment is created with no partner. We still make partners mandatory
# on payment groups. So, we dont create payment groups for payments
# without partner_id
payments = env['account.payment'].search(
[('partner_id', '!=', False)])
for payment in payments:
_logger.info('creating payment group for payment %s' % payment.id)
_state = payment.state in ['sent', 'reconciled'] and 'posted' or payment.state
_state = _state if _state != 'cancelled' else 'cancel'
env['account.payment.group'].create({
'company_id': payment.company_id.id,
'partner_type': payment.partner_type,
'partner_id': payment.partner_id.id,
'payment_date': payment.date,
'communication': payment.ref,
'payment_ids': [(4, payment.id, False)],
'state': _state,
})
|
ingadhoc/account-payment
|
account_payment_group/hooks.py
|
Python
|
agpl-3.0
| 1,366
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):8334")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
shillingcoins/source
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 708
|
# -*- encoding: utf-8 -*-
import pytest
from abjad import *
def test_scoretools_GraceContainer_01():
r'''Grace music is a container.
'''
gracecontainer = scoretools.GraceContainer(
[Note(0, (1, 16)), Note(2, (1, 16)), Note(4, (1, 16))])
assert systemtools.TestManager.compare(
gracecontainer,
r'''
\grace {
c'16
d'16
e'16
}
'''
)
assert isinstance(gracecontainer, Container)
assert len(gracecontainer) == 3
def test_scoretools_GraceContainer_02():
r'''GraceContainer.kind is managed attribute.
GraceContainer.kind knows about "after", "grace",
"acciaccatura", "appoggiatura".
'''
gracecontainer = scoretools.GraceContainer(
[Note(0, (1, 16)), Note(2, (1, 16)), Note(4, (1, 16))])
gracecontainer.kind = 'acciaccatura'
assert gracecontainer.kind == 'acciaccatura'
gracecontainer.kind = 'grace'
assert gracecontainer.kind == 'grace'
gracecontainer.kind = 'after'
assert gracecontainer.kind == 'after'
gracecontainer.kind = 'appoggiatura'
assert gracecontainer.kind == 'appoggiatura'
assert pytest.raises(AssertionError, 'gracecontainer.kind = "blah"')
def test_scoretools_GraceContainer_03():
r'''Grace formats correctly as grace.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8 c'8")
gracecontainer.kind = 'grace'
assert systemtools.TestManager.compare(
gracecontainer,
r'''
\grace {
c'8
c'8
c'8
}
'''
)
def test_scoretools_GraceContainer_04():
r'''Grace formats correctly as acciaccatura.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8 c'8")
gracecontainer.kind = 'acciaccatura'
assert systemtools.TestManager.compare(
gracecontainer,
r'''
\acciaccatura {
c'8
c'8
c'8
}
'''
)
def test_scoretools_GraceContainer_05():
r'''Grace formats correctly as appoggiatura.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8 c'8")
gracecontainer.kind = 'appoggiatura'
assert systemtools.TestManager.compare(
gracecontainer,
r'''
\appoggiatura {
c'8
c'8
c'8
}
'''
)
def test_scoretools_GraceContainer_06():
r'''Grace formats correctly as after grace.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8 c'8")
gracecontainer.kind = 'after'
assert systemtools.TestManager.compare(
gracecontainer,
r'''
{
c'8
c'8
c'8
}
'''
)
def test_scoretools_GraceContainer_07():
r'''Grace containers can be appended.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8")
note = Note(1, (1, 4))
gracecontainer.append(note)
assert len(gracecontainer) == 3
assert gracecontainer[-1] is note
def test_scoretools_GraceContainer_08():
r'''Grace containers can be extended.
'''
gracecontainer = scoretools.GraceContainer("c'8 c'8")
ns = Note(1, (1, 4)) * 2
gracecontainer.extend(ns)
assert len(gracecontainer) == 4
assert tuple(gracecontainer[-2:]) == tuple(ns)
|
mscuthbert/abjad
|
abjad/tools/scoretools/test/test_scoretools_GraceContainer.py
|
Python
|
gpl-3.0
| 3,332
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 22:03:21 2017
@author: p
"""
import numpy as np
a=np.ones((33))
with open('/home/p/ABC/abc/Epics/test','a+') as f:
f.writelines(str(a))
A=str(a).replace('\n','')[1:-1:]+' '
print A
if True:
pass
elif True:
pass
|
iABC2XYZ/abc
|
Epics/testWrite.py
|
Python
|
gpl-3.0
| 322
|
'''
Created on Dec 26, 2012
@author: dstrauss
Copyright 2013 David Strauss
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
class for implementing the l1 update via pipelined admm
'''
import numpy as np
import solver
import copy
class lasso(object):
''' class to implement l1 minimization update '''
def __init__(self,m,n,rho,lmb,ch):
''' set some parameters '''
self.m = m
self.n = n
self.ch = ch # number of channels of simultaneous data
self.rho = rho # internal algorithm parameter
self.lmb = lmb # fixed regularization parameter
self.alp = 1.5 # over-relaxation parameter
self.zp = np.zeros(self.n,dtype='complex128') # primal variable
self.zd = np.zeros(self.n,dtype='complex128') # aux variable
# self.zt = np.zeros(self.n,dtype='complex128') # dual variable
def solveL1(self,y,A):
''' solve min ||y-Ax|| + lmb||x||_1 with a warm start for x=zt
input: x,y,A
where A is a designerConv/convFourier object with mtx, mtxT routines
'''
# make sure that the input data and the transformation is the correct size
assert(len(y) == self.ch)
assert(len(A) == self.ch)
zt = [np.zeros(self.n,dtype='complex128') for ix in xrange(self.ch)]
zd = [np.zeros(self.n,dtype='complex128') for ix in xrange(self.ch)]
# debug by println statements -- gives a flavor of how the listing brackets work
# print [yl.shape for yl in y]
Atb = [Al.mtxT(yl) for Al,yl in zip(A,y)]
# print [Atbl.shape for Atbl in Atb]
# print [ztl.shape for ztl in zt]
M = [invOp(Al,self.rho,self.m) for Al in A]
self.rrz = list()
self.gap = list()
for itz in range(20):
b = [Atbl + self.rho*(zdl-ztl) for Atbl,zdl,ztl in zip(Atb,zd,zt)]
sout = [solver.cg(Ml,Al.mtx(bl),tol=1e-6,maxiter=20) for Ml,Al,bl in zip(M,A,b)]
stg = 'l1 iter: ' + repr(itz)
for ss in sout:
stg += ' cvg ' + repr(ss[1])
zold = copy.deepcopy(zd)
# project into linear constraint
uux = [bl/self.rho-(1.0/(self.rho**2))*(Al.mtxT(ss[0])) for bl,Al,ss in zip(b,A,sout)]
# over relax
zp = [self.alp*uuxl + (1.0 - self.alp)*zoldl for uuxl,zoldl in zip(uux,zold)]
# soft threshold
zths = [a+b for a,b in zip(zp,zt)]
zd = svtspecial(zths,self.lmb/self.rho)
# update dual variables
zt = [a + b-c for a,b,c in zip(zt,zp,zd)]
self.rrz.append(sum([np.linalg.norm(Al.mtx(zpl) - yl) for Al,zpl,yl in zip(A,zp,y)]))
self.gap.append(sum([np.linalg.norm(zpl-zdl) for zpl,zdl in zip(zp,zd)] ))
return zd
def invOp(fnc,rho,n):
'''create an object that does a simple algebraic multiplication'''
return lambda x: x + (1.0/rho)*fnc.mtx(fnc.mtxT(x))
# return lin.LinearOperator((n,n), lambda x: x + (1.0/rho)*fnc.mtx(fnc.mtxT(x)), \
# dtype='complex128')
def svt(z,lmb):
''' soft thresholding '''
return np.maximum(1.0-lmb/np.abs(z),0.0)*z;
def svtspecial(z,lmb):
''' soft thresholding '''
return np.maximum(1.0-lmb/np.sqrt(sum(np.abs(z)**2)),0.0)*z;
def testSvt():
import matplotlib.pyplot as plt
x = np.linspace(-5,5,500)
y = svt(x,2)
plt.figure(1)
plt.plot(x,y)
plt.show()
return y
def test():
''' test against matlab routine. double check! because there ain't no CVX for this'''
import scipy.io as spio
import matplotlib.pyplot as plt
import designerConv
import convFourier
# load data from a fakel1 data set created by 'testL1.m'
D = spio.loadmat('fakeL1.mat')
m = D['m'].astype('int64').flatten()
p = D['p'].astype('int64').flatten()
q = D['q'].astype('int64').flatten()
wTrue = D['wTrue']
y = D['sig'].flatten()
print 'size of m,p,q ' + repr(m) + ' ' + repr(p) + ' ' + repr(q)
print 'and thes is mp ' + repr(m*p)
rho = 1
lmb = 0.2
W = lasso(m,(p+1)*m,rho,lmb)
# A = designerConv.convOperator(m,p,q)
A = convFourier.convFFT(m,p,q)
A.changeWeights(wTrue)
print wTrue.shape
print y.shape
z = W.solveL1(y, A)
zTrue = D['zTrue'].flatten()
zM = D['zd'].flatten()
print np.linalg.norm(zM-z)
plt.figure(83)
plt.plot(range(z.size), np.abs(z), range(zTrue.size), np.abs(zTrue),
range(zM.size), np.abs(zM))
plt.show()
if __name__=='__main__':
test()
|
daStrauss/sparseConv
|
src/lassoUpdate.py
|
Python
|
apache-2.0
| 5,313
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import IntProperty, EnumProperty, FloatProperty, StringProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, list_match_func, list_match_modes
import numpy as np
def range_step_stop(start, stop, step, n_type, out_numpy):
'''Behaves like range but for floats'''
step = max(1e-5, abs(step))
if start > stop:
step = -step
result = np.arange(start, stop, step, dtype=n_type)
return result if out_numpy else result.tolist()
def range_stop_count(start, stop, count, n_type, out_numpy):
''' Gives count total values in [start,stop] '''
# we are casting to int here because the input can be floats.
result = np.linspace(start, stop, num=int(count), dtype=n_type)
return result if out_numpy else result.tolist()
def range_step_count(start, step, count, n_type, out_numpy):
''' Gives count values with step from start'''
stop = start + step * (count-1)
result = np.linspace(start, stop, num=int(count), dtype=n_type)
return result if out_numpy else result.tolist()
class SvGenNumberRange(bpy.types.Node, SverchCustomTreeNode):
''' Generator range list of floats'''
bl_idname = 'SvGenNumberRange'
bl_label = 'Number Range'
bl_icon = 'IPO_LINEAR'
start_float: FloatProperty(
name='start', description='start',
default=0, update=updateNode)
stop_float: FloatProperty(
name='stop', description='stop',
default=10, update=updateNode)
count_: IntProperty(
name='count', description='number of items',
default=10, min=1, update=updateNode)
step_float: FloatProperty(
name='step', description='step, difference among items',
default=1.0, update=updateNode)
start_int: IntProperty(
name='start', description='start',
default=0, update=updateNode)
stop_int: IntProperty(
name='stop', description='stop',
default=10, update=updateNode)
step_int: IntProperty(
name='step', description='step, difference among items',
default=1, min=1, update=updateNode)
list_match: EnumProperty(
name="List Match",
description="Behavior on different list lengths",
items=list_match_modes, default="REPEAT",
update=updateNode)
flat_output: BoolProperty(
name="Flat output",
description="Flatten output by list-joining level 1",
default=True,
update=updateNode)
output_numpy: BoolProperty(
name='Output NumPy',
description='Output NumPy arrays',
default=False, update=updateNode)
current_mode: StringProperty(default="FLOATRANGE")
main_modes = [
("int", "Int", "Integer Series", 1),
("float", "Float", "Float Series", 2),
]
range_modes = [
("RANGE", "Range", "Define range by setting start, step and stop.", 1),
("RANGE_COUNT", "Count", "Define range by setting start, stop and count number (divisions).", 2),
("RANGE_STEP", "Step", "Define range by setting start, step and count number", 3),
]
def mode_change(self, context):
# just because click doesn't mean we need to change mode
mode = self.number_mode + self.range_mode
if mode == self.current_mode:
return
mode = self.range_mode
self.inputs[0].prop_name = 'start_' + self.number_mode
if mode == 'RANGE':
self.inputs[1].prop_name = 'stop_' + self.number_mode
self.inputs[1].label = 'stop'
self.inputs[2].prop_name = 'step_' + self.number_mode
self.inputs[2].label = 'step'
elif mode == 'RANGE_COUNT':
self.inputs[1].prop_name = 'stop_' + self.number_mode
self.inputs[1].label = 'stop'
self.inputs[2].prop_name = 'count_'
self.inputs[2].label = 'count'
else:
self.inputs[1].prop_name = 'step_' + self.number_mode
self.inputs[1].label = 'step'
self.inputs[2].prop_name = 'count_'
self.inputs[2].label = 'count'
self.current_mode = mode
updateNode(self, context)
number_mode: EnumProperty(
name='Number Type',
items=main_modes,
default='float',
update=mode_change)
range_mode: EnumProperty(
name='Range Mode',
items=range_modes,
default='RANGE',
update=mode_change)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Start").prop_name = 'start_float'
sock1 = self.inputs.new('SvStringsSocket', "Step")
sock1.prop_name = 'stop_float'
sock1.label = 'stop'
sock2 = self.inputs.new('SvStringsSocket', "Stop")
sock2.prop_name = 'step_float'
sock2.label = 'step'
self.outputs.new('SvStringsSocket', "Range")
def draw_buttons(self, context, layout):
layout.prop(self, "number_mode", expand=True)
layout.prop(self, "range_mode", expand=True)
def draw_buttons_ext(self, ctx, layout):
layout.prop(self, "number_mode", expand=True)
layout.prop(self, "range_mode", expand=True)
layout.prop(self, "list_match", expand=False)
layout.prop(self, "flat_output", expand=False)
layout.prop(self, "output_numpy", expand=False)
def rclick_menu(self, context, layout):
layout.prop_menu_enum(self, "number_mode")
layout.prop_menu_enum(self, "range_mode")
layout.prop_menu_enum(self, "list_match", text="List Match")
layout.prop(self, "flat_output", expand=False)
layout.prop(self, "output_numpy", expand=False)
range_func_dict = {'RANGE': range_step_stop,
'RANGE_COUNT': range_stop_count,
'RANGE_STEP': range_step_count}
def migrate_from(self, old_node):
if old_node.bl_idname == 'SvGenFloatRange':
self.number_mode = 'float'
self.start_float = old_node.start_
self.stop_float = old_node.stop_
self.step_float = old_node.step_
self.count_ = old_node.count_
else:
self.number_mode = 'int'
self.start_int = old_node.start_
self.stop_int = old_node.stop_
self.step_int = old_node.step_
self.count_ = old_node.count_
if old_node.mode in ['FRANGE','LAZYRANGE']:
self.range_mode = 'RANGE'
elif old_node.mode == 'FRANGE_COUNT':
self.range_mode = 'RANGE_COUNT'
else:
self.range_mode = "RANGE_STEP"
def process(self):
inputs = self.inputs
outputs = self.outputs
if not outputs[0].is_linked:
return
matching_f = list_match_func[self.list_match]
params = [s.sv_get() for s in inputs]
if self.number_mode == 'int':
dtype = np.int64
current_func = self.range_func_dict[self.range_mode]
else:
dtype = np.float64
current_func = self.range_func_dict[self.range_mode]
result =[]
add_f = result.extend if self.flat_output else result.append
out_numpy = self.output_numpy
for p in zip(*matching_f(params)):
out = [current_func(*args, dtype, out_numpy) for args in zip(*matching_f(p))]
add_f(out)
outputs['Range'].sv_set(result)
def register():
bpy.utils.register_class(SvGenNumberRange)
def unregister():
bpy.utils.unregister_class(SvGenNumberRange)
|
DolphinDream/sverchok
|
nodes/number/number_range.py
|
Python
|
gpl-3.0
| 8,373
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Given a swatch name from bootswatch.com and a parent theme, creates a custom theme."""
from __future__ import print_function
import os
import requests
from nikola.plugin_categories import Command
from nikola import utils
LOGGER = utils.get_logger('bootswatch_theme', utils.STDERR_HANDLER)
class CommandBootswatchTheme(Command):
"""Given a swatch name from bootswatch.com and a parent theme, creates a custom theme."""
name = "bootswatch_theme"
doc_usage = "[options]"
doc_purpose = "given a swatch name from bootswatch.com and a parent theme, creates a custom"\
" theme"
cmd_options = [
{
'name': 'name',
'short': 'n',
'long': 'name',
'default': 'custom',
'type': str,
'help': 'New theme name (default: custom)',
},
{
'name': 'swatch',
'short': 's',
'default': '',
'type': str,
'help': 'Name of the swatch from bootswatch.com.'
},
{
'name': 'parent',
'short': 'p',
'long': 'parent',
'default': 'bootstrap3',
'help': 'Parent theme name (default: bootstrap3)',
},
]
def _execute(self, options, args):
"""Given a swatch name and a parent theme, creates a custom theme."""
name = options['name']
swatch = options['swatch']
if not swatch:
LOGGER.error('The -s option is mandatory')
return 1
parent = options['parent']
version = ''
# See if we need bootswatch for bootstrap v2 or v3
themes = utils.get_theme_chain(parent)
if 'bootstrap3' not in themes and 'bootstrap3-jinja' not in themes:
version = '2'
elif 'bootstrap' not in themes and 'bootstrap-jinja' not in themes:
LOGGER.warn('"bootswatch_theme" only makes sense for themes that use bootstrap')
elif 'bootstrap3-gradients' in themes or 'bootstrap3-gradients-jinja' in themes:
LOGGER.warn('"bootswatch_theme" doesn\'t work well with the bootstrap3-gradients family')
LOGGER.info("Creating '{0}' theme from '{1}' and '{2}'".format(name, swatch, parent))
utils.makedirs(os.path.join('themes', name, 'assets', 'css'))
for fname in ('bootstrap.min.css', 'bootstrap.css'):
url = 'http://bootswatch.com'
if version:
url += '/' + version
url = '/'.join((url, swatch, fname))
LOGGER.info("Downloading: " + url)
data = requests.get(url).text
with open(os.path.join('themes', name, 'assets', 'css', fname),
'wb+') as output:
output.write(data.encode('utf-8'))
with open(os.path.join('themes', name, 'parent'), 'wb+') as output:
output.write(parent.encode('utf-8'))
LOGGER.notice('Theme created. Change the THEME setting to "{0}" to use it.'.format(name))
|
agustinhenze/nikola.debian
|
nikola/plugins/command/bootswatch_theme.py
|
Python
|
mit
| 4,155
|
import re
from functools import partial
from urllib.parse import urlencode
from geopy import exc
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("What3Words", "What3WordsV3")
_MULTIPLE_WORD_RE = re.compile(
r"[^\W\d\_]+\.{1,1}[^\W\d\_]+\.{1,1}[^\W\d\_]+$", re.U
)
def _check_query(query):
"""
Check query validity with regex
"""
if not _MULTIPLE_WORD_RE.match(query):
return False
else:
return True
class What3Words(Geocoder):
"""What3Words geocoder using the legacy V2 API.
Documentation at:
https://docs.what3words.com/api/v2/
.. attention::
Consider using :class:`.What3WordsV3` instead.
"""
geocode_path = '/v2/forward'
reverse_path = '/v2/reverse'
def __init__(
self,
api_key,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
):
"""
:param str api_key: Key provided by What3Words
(https://accounts.what3words.com/register).
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme='https',
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.api_key = api_key
domain = 'api.what3words.com'
self.geocode_api = '%s://%s%s' % (self.scheme, domain, self.geocode_path)
self.reverse_api = '%s://%s%s' % (self.scheme, domain, self.reverse_path)
def geocode(
self,
query,
*,
lang='en',
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point for a `3 words` query. If the `3 words` address
doesn't exist, a :class:`geopy.exc.GeocoderQueryError` exception will be
thrown.
:param str query: The 3-word address you wish to geocode.
:param str lang: two character language code as supported by
the API (https://docs.what3words.com/api/v2/#lang).
:param bool exactly_one: Return one result or a list of results, if
available. Due to the address scheme there is always exactly one
result for each `3 words` address, so this parameter is rather
useless for this geocoder.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if not _check_query(query):
raise exc.GeocoderQueryError(
"Search string must be 'word.word.word'"
)
params = {
'addr': query,
'lang': lang.lower(),
'key': self.api_key,
}
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_json(self, resources, exactly_one=True):
"""
Parse type, words, latitude, and longitude and language from a
JSON response.
"""
code = resources['status'].get('code')
if code:
# https://docs.what3words.com/api/v2/#errors
exc_msg = "Error returned by What3Words: %s" % resources['status']['message']
if code == 401:
raise exc.GeocoderAuthenticationFailure(exc_msg)
raise exc.GeocoderQueryError(exc_msg)
def parse_resource(resource):
"""
Parse record.
"""
if 'geometry' in resource:
words = resource['words']
position = resource['geometry']
latitude, longitude = position['lat'], position['lng']
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(words, (latitude, longitude), resource)
else:
raise exc.GeocoderParseError('Error parsing result.')
location = parse_resource(resources)
if exactly_one:
return location
else:
return [location]
def reverse(
self,
query,
*,
lang='en',
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return a `3 words` address by location point. Each point on surface has
a `3 words` address, so there's always a non-empty response.
:param query: The coordinates for which you wish to obtain the 3 word
address.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param str lang: two character language code as supported by the
API (https://docs.what3words.com/api/v2/#lang).
:param bool exactly_one: Return one result or a list of results, if
available. Due to the address scheme there is always exactly one
result for each `3 words` address, so this parameter is rather
useless for this geocoder.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
lang = lang.lower()
params = {
'coords': self._coerce_point_to_string(query),
'lang': lang.lower(),
'key': self.api_key,
}
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_reverse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_reverse_json(self, resources, exactly_one=True):
"""
Parses a location from a single-result reverse API call.
"""
return self._parse_json(resources, exactly_one)
class What3WordsV3(Geocoder):
"""What3Words geocoder using the V3 API.
Documentation at:
https://developer.what3words.com/public-api/docs
.. versionadded:: 2.2
"""
geocode_path = '/v3/convert-to-coordinates'
reverse_path = '/v3/convert-to-3wa'
def __init__(
self,
api_key,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
):
"""
:param str api_key: Key provided by What3Words
(https://accounts.what3words.com/register).
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
"""
super().__init__(
scheme='https',
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.api_key = api_key
domain = 'api.what3words.com'
self.geocode_api = '%s://%s%s' % (self.scheme, domain, self.geocode_path)
self.reverse_api = '%s://%s%s' % (self.scheme, domain, self.reverse_path)
def geocode(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point for a `3 words` query. If the `3 words` address
doesn't exist, a :class:`geopy.exc.GeocoderQueryError` exception will be
thrown.
:param str query: The 3-word address you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available. Due to the address scheme there is always exactly one
result for each `3 words` address, so this parameter is rather
useless for this geocoder.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if not _check_query(query):
raise exc.GeocoderQueryError(
"Search string must be 'word.word.word'"
)
params = {
'words': query,
'key': self.api_key,
}
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_json(self, resources, exactly_one=True):
"""
Parse type, words, latitude, and longitude and language from a
JSON response.
"""
error = resources.get('error')
if error is not None:
# https://developer.what3words.com/public-api/docs#error-handling
exc_msg = "Error returned by What3Words: %s" % resources["error"]["message"]
exc_code = error.get('code')
if exc_code in ['MissingKey', 'InvalidKey']:
raise exc.GeocoderAuthenticationFailure(exc_msg)
raise exc.GeocoderQueryError(exc_msg)
def parse_resource(resource):
"""
Parse record.
"""
if 'coordinates' in resource:
words = resource['words']
position = resource['coordinates']
latitude, longitude = position['lat'], position['lng']
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(words, (latitude, longitude), resource)
else:
raise exc.GeocoderParseError('Error parsing result.')
location = parse_resource(resources)
if exactly_one:
return location
else:
return [location]
def reverse(
self,
query,
*,
lang='en',
exactly_one=True,
timeout=DEFAULT_SENTINEL
):
"""
Return a `3 words` address by location point. Each point on surface has
a `3 words` address, so there's always a non-empty response.
:param query: The coordinates for which you wish to obtain the 3 word
address.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param str lang: two character language code as supported by the
API (https://developer.what3words.com/public-api/docs#available-languages).
:param bool exactly_one: Return one result or a list of results, if
available. Due to the address scheme there is always exactly one
result for each `3 words` address, so this parameter is rather
useless for this geocoder.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
lang = lang.lower()
params = {
'coordinates': self._coerce_point_to_string(query),
'language': lang.lower(),
'key': self.api_key,
}
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_reverse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_reverse_json(self, resources, exactly_one=True):
"""
Parses a location from a single-result reverse API call.
"""
return self._parse_json(resources, exactly_one)
|
geopy/geopy
|
geopy/geocoders/what3words.py
|
Python
|
mit
| 14,218
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LoadBalancer(Resource):
"""LoadBalancer resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param frontend_ip_configurations: Object representing the frontend IPs to
be used for the load balancer
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2016_09_01.models.FrontendIPConfiguration]
:param backend_address_pools: Collection of backend address pools used by
a load balancer
:type backend_address_pools:
list[~azure.mgmt.network.v2016_09_01.models.BackendAddressPool]
:param load_balancing_rules: Object collection representing the load
balancing rules Gets the provisioning
:type load_balancing_rules:
list[~azure.mgmt.network.v2016_09_01.models.LoadBalancingRule]
:param probes: Collection of probe objects used in the load balancer
:type probes: list[~azure.mgmt.network.v2016_09_01.models.Probe]
:param inbound_nat_rules: Collection of inbound NAT Rules used by a load
balancer. Defining inbound NAT rules on your load balancer is mutually
exclusive with defining an inbound NAT pool. Inbound NAT pools are
referenced from virtual machine scale sets. NICs that are associated with
individual virtual machines cannot reference an Inbound NAT pool. They
have to reference individual inbound NAT rules.
:type inbound_nat_rules:
list[~azure.mgmt.network.v2016_09_01.models.InboundNatRule]
:param inbound_nat_pools: Defines an external port range for inbound NAT
to a single backend port on NICs associated with a load balancer. Inbound
NAT rules are created automatically for each NIC associated with the Load
Balancer using an external port from this range. Defining an Inbound NAT
pool on your Load Balancer is mutually exclusive with defining inbound Nat
rules. Inbound NAT pools are referenced from virtual machine scale sets.
NICs that are associated with individual virtual machines cannot reference
an inbound NAT pool. They have to reference individual inbound NAT rules.
:type inbound_nat_pools:
list[~azure.mgmt.network.v2016_09_01.models.InboundNatPool]
:param outbound_nat_rules: The outbound NAT rules.
:type outbound_nat_rules:
list[~azure.mgmt.network.v2016_09_01.models.OutboundNatRule]
:param resource_guid: The resource GUID property of the load balancer
resource.
:type resource_guid: str
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[FrontendIPConfiguration]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'},
'probes': {'key': 'properties.probes', 'type': '[Probe]'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[InboundNatRule]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[InboundNatPool]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[OutboundNatRule]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, frontend_ip_configurations=None, backend_address_pools=None, load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None, outbound_nat_rules=None, resource_guid: str=None, provisioning_state: str=None, etag: str=None, **kwargs) -> None:
super(LoadBalancer, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.frontend_ip_configurations = frontend_ip_configurations
self.backend_address_pools = backend_address_pools
self.load_balancing_rules = load_balancing_rules
self.probes = probes
self.inbound_nat_rules = inbound_nat_rules
self.inbound_nat_pools = inbound_nat_pools
self.outbound_nat_rules = outbound_nat_rules
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/load_balancer_py3.py
|
Python
|
mit
| 5,752
|
import csv
import sys
import re
import datetime, time
csv.field_size_limit( 1000000 )
input_date_format = '%Y-%m-%d %H:%M:%S.%f'
input_date_format_alt = '%Y-%m-%d %H:%M:%S'
input_file = sys.argv[1]
output_file = sys.argv[2]
try:
errors_file = sys.argv[3]
except IndexError:
errors_file = 'errors.csv'
print "%s ---> %s" % ( input_file, output_file )
i = open( input_file )
o = open( output_file, 'wb' )
e = open( errors_file, 'wb' )
reader = csv.reader( i, delimiter = '\t' )
writer = csv.writer( o )
error_writer = csv.writer( e )
headers = reader.next()
for line in reader:
id = line[0]
try:
start_date = line[9]
try:
start_date = datetime.datetime.strptime( start_date, input_date_format )
except ValueError:
try:
start_date = datetime.datetime.strptime( start_date, input_date_format_alt )
except ValueError:
print line
error_writer.writerow( line )
continue
start_timestamp = int( time.mktime( start_date.timetuple()))
end_date = line[10]
try:
end_date = datetime.datetime.strptime( end_date, input_date_format )
except ValueError:
try:
end_date = datetime.datetime.strptime( end_date, input_date_format_alt )
except ValueError:
print line
error_writer.writerow( line )
continue
end_timestamp = int( time.mktime( end_date.timetuple()))
new_line = [ id, start_timestamp, end_timestamp ]
writer.writerow( new_line )
except IndexError:
error_writer.writerow( line )
print id
|
zygmuntz/kaggle-jobs
|
test_jobs/job_times.py
|
Python
|
bsd-3-clause
| 1,521
|
#!/usr/bin/env python
# vim: expandtab tabstop=4 shiftwidth=4
class InvalidBlogUrl(Exception):
def __str__(self):
return 'invalid blog url'
class FileExists(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return 'file exists: {0}'.format(repr(self.value))
class StaticFileExists(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return 'static file exists: {0}'.format(repr(self.value))
class UpdateStopped(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return 'update stopped: {0}'.format(repr(self.value))
|
Leryan/pytumblder
|
tumblder/exceptions.py
|
Python
|
bsd-2-clause
| 682
|
#!/usr/bin/python
import sqlite3
import os.path
from os import makedirs
import sys
import re
from subprocess import call
import argparse
import textwrap
import csv
import StringIO
def list_tasks(dbname = "results.db"):
"""Return a list of all task names in a database"""
db = connect_db(dbname)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
return [row[0] for row in cursor.fetchall()]
def describe_tasks(tasks, dbname = "results.db"):
"""Return a list of all shared parameters for some tasks of a database"""
db = connect_db(dbname)
cursor = db.cursor()
if not isinstance(tasks,list):
tasks = [tasks]
# only the shared columns will remain
shared_params = []
for task in tasks:
query_command = "pragma table_info([{}])".format(task);
cursor.execute(query_command);
task_params = [" ".join((row[1], row[2])) for row in cursor.fetchall()]
if shared_params:
shared_params = intersection(shared_params, task_params)
else:
shared_params = task_params
return shared_params
def retrieve_data(x_param, y_param, filters, tasks, dbname = "results.db"):
"""
Return a list of selected parameters and a data structure (list of list of tuples),
- 1st index corresponds to the task,
- 2nd index corresponds to the row,
- 3rd index corresponds to the selected parameter.
The key parameters that define a benchmark are always selected.
"""
db = connect_db(dbname)
data = []
cols_to_select = [x_param.split()[0], y_param.split()[0]]
# always pass shared primary key information (they define a distinct benchmark)
primary_keys = []
for t in range(len(tasks)):
if t == 0:
primary_keys = retrieve_primary_keys(tasks[t], db)
else:
primary_keys = intersection(primary_keys, retrieve_primary_keys(tasks[t], db))
for key in primary_keys:
if key not in cols_to_select:
cols_to_select.append(key)
# also pass filter parameter value in
for f in filters:
if f.param not in cols_to_select:
cols_to_select.append(f.param)
sql_val_args = []
filter_command = ""
for t in range(len(tasks)):
select_command = "SELECT DISTINCT {} FROM {} ".format(','.join(cols_to_select), task_name(tasks[t]))
if filters:
# first time, still need to populate sql_val_args and make filter_command
if t == 0:
filter_command = "WHERE "
for f in range(len(filters)):
filter_command += str(filters[f])
sql_val_args.extend(filters[f].args)
if f < len(filters) - 1:
filter_command += " AND "
select_command += filter_command
select_command += ';'
print(select_command)
cursor = db.cursor()
cursor.execute(select_command, sql_val_args)
data.append(tuple(tuple(row) for row in cursor.fetchall()));
return cols_to_select, data
def export_data_csv(selected_cols, data):
"""
Exports retrieved data as in-memory csv files.
Each task will be a separate csv file, with the naming left to the caller.
The first row will be the header for the selected columns, the rest will be values.
"""
for task_data in data:
csvf = StringIO.StringIO()
writer = csv.writer(csvf)
# header information
writer.writerow(selected_cols)
for row in task_data:
writer.writerow(row)
yield csvf
def export_data_csv_todisk(selected_cols, data, tasks, dir = "benchtracker_data"):
"""
Exports retrieved data as csv files on export_data_csv_todisk
Each task is a separate file with their full task name, '/' being replaced by '.'
"""
if not os.path.exists(dir):
makedirs(dir)
t = 0
for csvf in export_data_csv(selected_cols, data):
with open("".join([dir, '/', tasks[t].replace('/','.'), '.csv']), 'w') as f:
csvf.seek(0)
buf = csvf.read(1048576) # 1 MB
while buf:
f.write(buf)
buf = csvf.read(1048576)
t += 1
def describe_param(param, mode, tasks, dbname = "results.db"):
"""
Give back metainformation about a parameter to allow for easier filtering.
Param would be an element of the list returned by describe_tasks - space separated name and type
Returns a 2-tuple describing the parameter type and values for some tasks of a database.
- 1st value is either 'range' or 'categorical'
- 2nd value is either a 2-tuple for range types, or a n-tuple for categorical
"""
db = connect_db(dbname)
cursor = db.cursor()
(param_name, param_type) = param.split()
if param_type == "TEXT":
mode = 'categorical'
elif mode not in {'categorical', 'range'}:
raise ValueError
subquery = ""
min_param = "min_p"
max_param = "max_p"
if not isinstance(tasks,list):
subquery = task_name(tasks)
min_param = max_param = param_name
else:
subquery += '('
for t in range(len(tasks)):
if mode == "categorical":
subquery += "SELECT DISTINCT {} FROM {}".format(param_name, task_name(tasks[t]))
else:
subquery += "SELECT MIN({0}) as min_p, MAX({0}) as max_p FROM {1}".format(param_name, task_name(tasks[t]))
if t < len(tasks) - 1:
subquery += " UNION ALL "
subquery += ')'
print(subquery)
# categorical data, return a list of all distinct values
if mode == 'categorical':
cursor.execute("SELECT DISTINCT {} FROM {};".format(param_name, subquery))
return (mode,tuple(row[0] for row in cursor.fetchall()))
# ranged data, return (min, max)
else:
cursor.execute("SELECT MIN({}), MAX({}) FROM {};".format(min_param, max_param, subquery))
return (mode,tuple(cursor.fetchone()))
def connect_db(dbname = "results.db"):
"""Attempt a database connection, exiting with 1 if dbname does not exist, else return with db connection"""
if not os.path.isfile(dbname):
print("{} does not exist".format(dbname))
raise IOError(dbname)
db = sqlite3.connect(dbname)
db.row_factory = sqlite3.Row
return db
# filter object
valid_filter_methods = {"IN", "BETWEEN", "LIKE", "=", "<>", "!=", ">", "<", ">=", "<="}
class Task_filter:
def __init__(self, param, method, args):
self.param = param
if method.upper() in valid_filter_methods:
self.method = method.upper()
else:
print(method, "is not a supported filter method")
raise ValueError
self.args = args
def __str__(self):
substitutions = sql_substitute(self.args)
if self.method == "BETWEEN":
substitutions = "? AND ?"
elif self.method == "IN":
substitutions = '('+substitutions+')'
return "({} {} {})".format(self.param, self.method, substitutions)
# internal utilities
def task_name(task):
return '['+task+']'
def intersection(first, other):
intersection_set = set.intersection(set(first), set(other))
# reimpose order
return [item for item in first if item in intersection_set]
def sql_substitute(args):
return ('?,'*len(args)).rstrip(',')
def retrieve_primary_keys(task, db):
cursor = db.cursor()
cursor.execute("PRAGMA table_info(%s)" % task_name(task))
column_info = cursor.fetchall()
primary_keys = []
for info in column_info:
print(info)
if info[5] != 0:
print("key param:", info[1])
primary_keys.append(info[1])
return primary_keys
|
UGent-HES/ConnectionRouter
|
vtr_flow/scripts/benchtracker/interface_db.py
|
Python
|
mit
| 7,881
|
from django.test import TestCase
from diffanalysis.models import ActionReport
import osmdata
from osmdata.filters import IgnoreUsers, IgnoreElementsCreation, IgnoreElementsModification, AbstractActionFilter
from osmdata.importers import AdiffImporter
from osmdata.exporters import CSVExporter
from osmdata.models import Diff
from osmdata.tests.utils import get_test_file_path
from .models import Step, WorkFlow
class TestWorkFlow(TestCase):
fixtures = ['test_filters.json'] # Versailles Chantier
def test_workflow_from_settings(self):
ok_workflow = {
'import': 'osmdata.importers.AdiffImporter',
'export' : 'osmdata.exporters.CSVExporter',
'filters': [
('osmdata.filters.IgnoreUsers', [['jm']]),
('osmdata.filters.IgnoreElementsCreation', ['amenity=waterbasket']),
('osmdata.filters.IgnoreElementsModification', ['amenity=waterbasket']),
]
} # TO BE DELETED
ok_workflow = {
'name': 'test-wf',
'flow': [
{'type': 'import', 'class': 'osmdata.importers.AdiffImporter'},
{'type': 'filter', 'class': 'osmdata.filters.IgnoreUsers', 'params': ['jm']} ,
{'type': 'filter', 'class': 'osmdata.filters.IgnoreElementsCreation', 'params': ['amenity=wastebasket']} ,
{'type': 'filter', 'class': 'osmdata.filters.IgnoreElementsModification', 'params': ['amenity=wastebasket']} ,
{'type': 'export', 'class': 'osmdata.exporters.CSVExporter'}
]
}
wf = WorkFlow.from_settings('gare_standard', ok_workflow)
self.assertEqual(wf.name, 'gare_standard')
self.assertEqual(len(wf.steps), 5)
self.assertIsInstance(wf.steps[0].instance, AdiffImporter)
self.assertIsInstance(wf.steps[1].instance, IgnoreUsers)
self.assertIsInstance(wf.steps[2].instance, IgnoreElementsCreation)
self.assertIsInstance(wf.steps[3].instance, IgnoreElementsModification)
self.assertIsInstance(wf.steps[4].instance, CSVExporter)
def test_invalid_workflow_from_settings(self):
with self.assertRaises(ValueError, msg='missing name'):
WorkFlow.from_settings('a', {'flow': []})
with self.assertRaises(ValueError, msg='missing flow'):
WorkFlow.from_settings('a', {'name': 'foo'})
with self.assertRaises(ValueError, msg='unknown key'):
WorkFlow.from_settings('a', {
'name': 'foo', 'flow': [], 'bar': 'zut'
})
with self.assertRaises(ValueError, msg='wrong step type'):
WorkFlow.from_settings('a', {
'name': 'foo', 'flow': [
{'type': 'zut', 'class': 'osmdata.exporters.CSVExporter'},
]
})
with self.assertRaises(ValueError, msg='inexistant step class'):
WorkFlow.from_settings('a', {
'name': 'foo',
'flow': [{'type': 'exporter', 'class': 'osmdata.exporters.No'}]
})
def test_run_empty(self):
wf = WorkFlow(
name='test',
steps=[]
)
wf.run([get_test_file_path('create_action.osm')], ['/dev/null'])
def test_run_import(self):
wf = WorkFlow(
name='test',
steps=[
Step(Step.STEP_IMPORT, osmdata.importers.AdiffImporter, [])
]
)
self.assertEqual(Diff.objects.count(), 1)
self.assertEqual(ActionReport.objects.count(), 0)
wf.run([get_test_file_path('create_action.osm')], ['/dev/null'])
self.assertEqual(Diff.objects.count(), 2)
# Check that action reports have been made
self.assertEqual(ActionReport.objects.count(), 1)
def test_output(self):
class _CounterExporter:
def run(self, action_qs):
return action_qs.count()
wf = WorkFlow(
name='test',
steps=[
Step(Step.STEP_IMPORT, osmdata.importers.AdiffImporter, []),
Step(Step.STEP_EXPORT, _CounterExporter, [])
]
)
wf.run([get_test_file_path('create_action.osm')], ['/dev/null'])
self.assertEqual(wf.last_step_output, 1)
def test_filter_filter_in(self):
wf = WorkFlow(
name='test',
steps=[
Step(Step.STEP_IMPORT, osmdata.importers.AdiffImporter, []),
Step(Step.STEP_FILTER, osmdata.filters.IgnoreUsers,
[["DoNotExist"]])
]
)
wf.run([get_test_file_path('create_action.osm')], ['/dev/null'])
self.assertEqual(wf.last_step_output.count(), 1)
def test_filter_filter_out(self):
wf = WorkFlow(
name='test',
steps=[
Step(Step.STEP_IMPORT, osmdata.importers.AdiffImporter, []),
Step(Step.STEP_FILTER, osmdata.filters.IgnoreUsers, [["Yann_L"]])
]
)
wf.run([get_test_file_path('create_action.osm')], ['/dev/null'])
self.assertEqual(wf.last_step_output.count(), 0)
|
Cartocite/osmada
|
workflows/tests.py
|
Python
|
agpl-3.0
| 5,183
|
#!/usr/bin/env python
# encoding: utf-8
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
dp = [0 for _ in range(n)]
ans = 0
if n >= 2 and s[0] == '(' and s[1] == ')':
dp[1] = 2
ans = 2
for i in range(2, n):
if s[i] == ')' and s[i-1] == '(':
dp[i] = dp[i-2] + 2
if s[i] == ')' and s[i-1] == ')':
u = dp[i-1]
if i-u-1>=0 and s[i-u-1] == '(':
dp[i] = dp[i-1] + 2
if i-u-2 >= 0:
dp[i] += dp[i-u-2]
ans = max(ans, dp[i])
# print 'dp[%d]: %d' % (i, dp[i])
return ans
s = Solution()
print s.longestValidParentheses('())')
print s.longestValidParentheses("()(())")
print s.longestValidParentheses("(()))())(")
|
ShengRang/c4f
|
leetcode/longest-valid-parentheses.py
|
Python
|
gpl-3.0
| 931
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from . import unit_base
from decimal import Decimal
from flask_babel import lazy_gettext
KILOMETER_SYMBOL = "km"
def convert(miles, **kwargs):
"""
Converts from miles to kilometers
:param miles: Miles value in string
:return: Kilometers value in string
"""
miles_to_km_rate = Decimal(1.852)
miles_list = miles.split("-")
kilometers = [unit_base.format_converted((Decimal(m) * miles_to_km_rate), precision=1) for m in miles_list]
return "-".join(kilometers), KILOMETER_SYMBOL
def nautical_miles_to_metric(item, **kwargs):
"""Converts distance values from nautical miles to metric"""
regex = r"(\d+-?,?\.?\d*)((\s*)|(-))((nmi)|([nN]autical [mM]iles?))\b"
return unit_base.do_conversion(item, convert, unit_base.format_output, regex, match_index=0, value_index=1)
name = "nautical_miles_to_metric"
label = lazy_gettext("Length nautical miles to kilometres")
callback = nautical_miles_to_metric
access_type = "frontend"
action_type = "interactive"
group = lazy_gettext("length")
|
superdesk/superdesk-core
|
superdesk/macros/imperial/length_nautical_miles_to_metric.py
|
Python
|
agpl-3.0
| 1,339
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.forms import ValidationError
from django.http import Http404
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.models import Document2, Document
from notebook.connectors.base import QueryExpired, QueryError, SessionExpired, AuthenticationRequired
LOG = logging.getLogger(__name__)
def check_document_access_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
notebook_id = request.GET.get('notebook', request.GET.get('editor'))
if not notebook_id:
notebook_id = json.loads(request.POST.get('notebook', '{}')).get('id')
try:
if notebook_id:
if str(notebook_id).isdigit():
document = Document2.objects.get(id=notebook_id)
document.can_read_or_exception(request.user)
else:
Document2.objects.get_by_uuid(user=request.user, uuid=notebook_id)
except Document2.DoesNotExist:
raise PopupException(_('Document %(id)s does not exist') % {'id': notebook_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_document_modify_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
notebook = json.loads(request.POST.get('notebook', '{}'))
try:
if notebook.get('id'):
doc2 = Document2.objects.get(id=notebook['id'])
doc2.can_write_or_exception(request.user)
except Document.DoesNotExist:
raise PopupException(_('Document %(id)s does not exist') % {'id': notebook.get('id')})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def api_error_handler(func):
def decorator(*args, **kwargs):
response = {}
try:
return func(*args, **kwargs)
except SessionExpired, e:
response['status'] = -2
except QueryExpired, e:
response['status'] = -3
except AuthenticationRequired, e:
response['status'] = 401
except ValidationError, e:
LOG.exception('Error validation %s' % func)
response['status'] = -1
response['message'] = e.message
except QueryError, e:
LOG.exception('Error running %s' % func)
response['status'] = 1
response['message'] = smart_unicode(e)
if e.handle:
response['handle'] = e.handle
if e.extra:
response.update(e.extra)
except Exception, e:
LOG.exception('Error running %s' % func)
response['status'] = -1
response['message'] = smart_unicode(e)
finally:
if response:
return JsonResponse(response)
return decorator
def json_error_handler(view_fn):
def decorator(*args, **kwargs):
try:
return view_fn(*args, **kwargs)
except Http404, e:
raise e
except Exception, e:
response = {
'error': str(e)
}
return JsonResponse(response, status=500)
return decorator
|
Peddle/hue
|
desktop/libs/notebook/src/notebook/decorators.py
|
Python
|
apache-2.0
| 3,947
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
### DO NOT EDIT THIS FILE ###
"""Helpers for an Ubuntu application."""
import logging
import os
from . digitalocean_indicatorconfig import get_data_file
from . Builder import Builder
from locale import gettext as _
def get_builder(builder_file_name):
"""Return a fully-instantiated Gtk.Builder instance from specified ui
file
:param builder_file_name: The name of the builder file, without extension.
Assumed to be in the 'ui' directory under the data path.
"""
# Look for the ui file that describes the user interface.
ui_filename = get_data_file('ui', '%s.ui' % (builder_file_name,))
if not os.path.exists(ui_filename):
ui_filename = None
builder = Builder()
builder.set_translation_domain('digitalocean-indicator')
builder.add_from_file(ui_filename)
return builder
# Owais Lone : To get quick access to icons and stuff.
def get_media_file(media_file_name):
media_filename = get_data_file('media', '%s' % (media_file_name,))
if not os.path.exists(media_filename):
media_filename = None
return "file:///"+media_filename
class NullHandler(logging.Handler):
def emit(self, record):
pass
def set_up_logging(opts):
# add a handler to prevent basicConfig
root = logging.getLogger()
null_handler = NullHandler()
root.addHandler(null_handler)
formatter = logging.Formatter("%(levelname)s:%(name)s: %(funcName)s() '%(message)s'")
logger = logging.getLogger('digitalocean_indicator')
logger_sh = logging.StreamHandler()
logger_sh.setFormatter(formatter)
logger.addHandler(logger_sh)
lib_logger = logging.getLogger('digitalocean_indicator_lib')
lib_logger_sh = logging.StreamHandler()
lib_logger_sh.setFormatter(formatter)
lib_logger.addHandler(lib_logger_sh)
# Set the logging level to show debug messages.
if opts.verbose:
logger.setLevel(logging.DEBUG)
logger.debug('logging enabled')
if opts.verbose > 1:
lib_logger.setLevel(logging.DEBUG)
def get_help_uri(page=None):
# help_uri from source tree - default language
here = os.path.dirname(__file__)
help_uri = os.path.abspath(os.path.join(here, '..', 'help', 'C'))
if not os.path.exists(help_uri):
# installed so use gnome help tree - user's language
help_uri = 'digitalocean-indicator'
# unspecified page is the index.page
if page is not None:
help_uri = '%s#%s' % (help_uri, page)
return help_uri
def show_uri(parent, link):
from gi.repository import Gtk # pylint: disable=E0611
screen = parent.get_screen()
Gtk.show_uri(screen, link, Gtk.get_current_event_time())
def alias(alternative_function_name):
'''see http://www.drdobbs.com/web-development/184406073#l9'''
def decorator(function):
'''attach alternative_function_name(s) to function'''
if not hasattr(function, 'aliases'):
function.aliases = []
function.aliases.append(alternative_function_name)
return function
return decorator
|
andrewsomething/digitalocean-indicator
|
digitalocean_indicator_lib/helpers.py
|
Python
|
gpl-3.0
| 3,770
|
# coding: utf-8
import matplotlib.pyplot as plt
def generate_boxplot(data, title, datasets_small_name):
fig = plt.figure(figsize=(8, 6))
bplot = plt.boxplot(data,
notch=False, # box instead of notch shape
# sym='rs', # red squares for outliers
vert=True) # vertical box aligmnent
plt.xticks([y + 1 for y in range(len(data))], datasets_small_name)
plt.xlabel('Dataset')
for components in bplot.keys():
for line in bplot[components]:
line.set_color('black') # black lines
plt.title(title)
plt.savefig('result/' + title + '.png')
|
owen198/kslab-atrisk-prediction
|
box_plot/func.py
|
Python
|
apache-2.0
| 660
|
#!/usr/bin/env python
import sys
from glob import glob
def ArgvToDict(argv_list=sys.argv, required=[], optional={}, verbose=True):
assert type(argv_list) == list and type(required) == list and type(optional) == dict
argv_dict = {}
if '-h' not in argv_list and '--help' not in argv_list:
if len(argv_list) > 1:
argv_list = ' '.join(argv_list).replace(' --', ' -').split(' -')[1:] # already deleted sys.argv[0]
for opt_list in argv_list:
opt_list = opt_list.split(' ', 1)
if ',' in opt_list[-1]:
opt_list = [opt_list[0],] + opt_list[-1].split(',')
elif '*' in opt_list[-1] or '?' in opt_list[-1]:
opt_list = [opt_list[0],] + glob(opt_list)
try:
opt_list.remove('')
except ValueError:
pass
opt_list_len = len(opt_list)
# "-option value":
# -option
if len(opt_list[0]) == 1:
opt_list[0] = '-' + opt_list[0]
elif len(opt_list[0]) > 1:
opt_list[0] = '--' + opt_list[0]
# value
if opt_list_len == 1:
argv_dict.update({opt_list[0]: True})
elif opt_list_len == 2:
argv_dict.update({opt_list[0]: opt_list[-1]})
elif opt_list_len >= 3:
argv_dict.update({opt_list[0]: opt_list[1:]})
# To add optional arguments:
for opt_argv in optional.iteritems():
if opt_argv[0] not in argv_dict.keys():
argv_dict.update({opt_argv[0]: opt_argv[-1]})
else:
pass
if verbose:
# # To check the arguments:
all_argv = required + optional.keys()
unknown_argv_len, lack_argv_len = 0, 0
# 1. unrecognized
if all_argv != []:
unknown_argv = []
for key in argv_dict.iterkeys():
if key not in all_argv:
unknown_argv.append('\"%s\"' % key)
unknown_argv_len = len(unknown_argv)
if unknown_argv_len > 0:
print 'Unrecognized argument' + 's'*int(round((unknown_argv_len-1.0)/unknown_argv_len)) + ': '\
+ ', '.join(unknown_argv) + '.'
# 2. required
if required != []:
lack_argv = []
for key in required:
if key not in argv_dict.keys():
lack_argv.append('\"%s\"' % key)
lack_argv_len = len(lack_argv)
if lack_argv_len > 0:
print 'Require the argument' + 's'*int(round((lack_argv_len-1.0)/lack_argv_len)) + ': '\
+ ', '.join(lack_argv) + '.'
if unknown_argv_len + lack_argv_len > 0:
print 'Please try again.'
argv_dict = {}
# 3. check all args roughly
else:
assert len(argv_dict.keys()) == len(all_argv)
else:
assert len(argv_dict.keys()) == len(all_argv)
else:
pass
else:
argv_dict = {'-h': True}
return argv_dict
|
yangwu91/fastx_subseq
|
Argv.py
|
Python
|
mit
| 2,581
|
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation
class Committer:
def __init__(self, name, email_or_emails, irc_nickname=None):
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.irc_nickname = irc_nickname
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
# This is intended as a canonical, machine-readable list of all non-reviewer
# committers for WebKit. If your name is missing here and you are a committer,
# please add it. No review needed. All reviewers are committers, so this list
# is only of committers who are not reviewers.
committers_unable_to_review = [
Committer("Aaron Boodman", "aa@chromium.org", "aboodman"),
Committer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"),
Committer("Adam Langley", "agl@chromium.org", "agl"),
Committer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"),
Committer("Albert J. Wong", "ajwong@chromium.org"),
Committer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"]),
Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"),
Committer("Alexander Pavlov", "apavlov@chromium.org", "apavlov"),
Committer("Alexis Menard", ["alexis.menard@openbossa.org", "menard@kde.org"], "darktears"),
Committer("Andre Boule", "aboule@apple.com"),
Committer("Andrei Popescu", "andreip@google.com", "andreip"),
Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"], "proton"),
Committer("Andrew Scherkus", "scherkus@chromium.org", "scherkus"),
Committer("Andrey Kosyakov", "caseq@chromium.org", "caseq"),
Committer("Andras Becsi", ["abecsi@webkit.org", "abecsi@inf.u-szeged.hu"], "bbandix"),
Committer("Andy Estes", "aestes@apple.com", "estes"),
Committer("Anthony Ricaud", "rik@webkit.org", "rik"),
Committer("Anton Muhin", "antonm@chromium.org", "antonm"),
Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"),
Committer("Ben Murdoch", "benm@google.com", "benm"),
Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org"], "icefox"),
Committer("Benjamin Kalman", ["kalman@chromium.org", "kalman@google.com"], "kalman"),
Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"], "otte"),
Committer("Brent Fulgham", "bfulgham@webkit.org", "bfulgham"),
Committer("Brett Wilson", "brettw@chromium.org", "brettx"),
Committer("Cameron McCormack", "cam@webkit.org", "heycam"),
Committer("Carlos Garcia Campos", ["cgarcia@igalia.com", "carlosgc@gnome.org", "carlosgc@webkit.org"], "KaL"),
Committer("Carol Szabo", "carol@webkit.org", "cszabo1"),
Committer("Chang Shu", ["cshu@webkit.org", "Chang.Shu@nokia.com"], "cshu"),
Committer("Chris Evans", "cevans@google.com"),
Committer("Chris Petersen", "cpetersen@apple.com", "cpetersen"),
Committer("Chris Rogers", "crogers@google.com", "crogers"),
Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org"]),
Committer("Collin Jackson", "collinj@webkit.org"),
Committer("Daniel Cheng", "dcheng@chromium.org", "dcheng"),
Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"], "catfishman"),
Committer("Dean Jackson", "dino@apple.com", "dino"),
Committer("Diego Gonzalez", ["diegohcg@webkit.org", "diego.gonzalez@openbossa.org"], "diegohcg"),
Committer("Dirk Pranke", "dpranke@chromium.org", "dpranke"),
Committer("Drew Wilson", "atwilson@chromium.org", "atwilson"),
Committer("Eli Fidler", "eli@staikos.net", "QBin"),
Committer("Emil A Eklund", "eae@chromium.org", "eae"),
Committer("Enrica Casucci", "enrica@apple.com"),
Committer("Erik Arvidsson", "arv@chromium.org", "arv"),
Committer("Eric Roman", "eroman@chromium.org", "eroman"),
Committer("Eric Uhrhane", "ericu@chromium.org", "ericu"),
Committer("Evan Martin", "evan@chromium.org", "evmar"),
Committer("Evan Stade", "estade@chromium.org", "estade"),
Committer("Fady Samuel", "fsamuel@chromium.org", "fsamuel"),
Committer("Feng Qian", "feng@chromium.org"),
Committer("Fumitoshi Ukai", "ukai@chromium.org", "ukai"),
Committer("Gabor Loki", "loki@webkit.org", "loki04"),
Committer("Gabor Rapcsanyi", ["rgabor@webkit.org", "rgabor@inf.u-szeged.hu"], "rgabor"),
Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"]),
Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]),
Committer("Greg Bolsinga", "bolsinga@apple.com"),
Committer("Gyuyoung Kim", ["gyuyoung.kim@samsung.com", "gyuyoung.kim@webkit.org"], "gyuyoung"),
Committer("Hans Wennborg", "hans@chromium.org", "hwennborg"),
Committer("Hayato Ito", "hayato@chromium.org", "hayato"),
Committer("Helder Correia", "helder@sencha.com", "helder"),
Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]),
Committer("Ilya Tikhonovsky", "loislo@chromium.org", "loislo"),
Committer("Ivan Krsti\u0107", "ike@apple.com"),
Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"], "jpetso"),
Committer("Jakub Wieczorek", "jwieczorek@webkit.org", "fawek"),
Committer("James Hawkins", ["jhawkins@chromium.org", "jhawkins@google.com"], "jhawkins"),
Committer("James Kozianski", ["koz@chromium.org", "koz@google.com"], "koz"),
Committer("James Simonsen", "simonjam@chromium.org", "simonjam"),
Committer("Jay Civelli", "jcivelli@chromium.org", "jcivelli"),
Committer("Jeff Miller", "jeffm@apple.com", "jeffm"),
Committer("Jenn Braithwaite", "jennb@chromium.org", "jennb"),
Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]),
Committer("Jer Noble", "jer.noble@apple.com", "jernoble"),
Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"),
Committer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"]),
Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"),
Committer("Jocelyn Turcotte", "jocelyn.turcotte@nokia.com", "jturcotte"),
Committer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
Committer("John Abd-El-Malek", "jam@chromium.org", "jam"),
Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"),
Committer("John Knottenbelt", "jknotten@chromium.org", "jknotten"),
Committer("Johnny Ding", ["jnd@chromium.org", "johnnyding.webkit@gmail.com"], "johnnyding"),
Committer("Joone Hur", ["joone.hur@collabora.co.uk", "joone@kldp.org", "joone@webkit.org"], "joone"),
Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"], "Altha"),
Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"], "jparent"),
Committer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com"]),
Committer("Jungshik Shin", "jshin@chromium.org"),
Committer("Justin Schuh", "jschuh@chromium.org", "jschuh"),
Committer("Keishi Hattori", "keishi@webkit.org", "keishi"),
Committer("Kelly Norton", "knorton@google.com"),
Committer("Kenji Imasaki", "imasaki@chromium.org", "imasaki"),
Committer("Kent Hansen", "kent.hansen@nokia.com", "khansen"),
Committer("Kimmo Kinnunen", ["kimmo.t.kinnunen@nokia.com", "kimmok@iki.fi", "ktkinnun@webkit.org"], "kimmok"),
Committer("Kinuko Yasuda", "kinuko@chromium.org", "kinuko"),
Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"),
Committer("Kwang Yul Seo", ["kwangyul.seo@gmail.com", "skyul@company100.net", "kseo@webkit.org"], "kwangseo"),
Committer("Leandro Pereira", ["leandro@profusion.mobi", "leandro@webkit.org"], "acidx"),
Committer("Levi Weintraub", ["leviw@chromium.org", "leviw@google.com", "lweintraub@apple.com"], "leviw"),
Committer("Lucas De Marchi", ["lucas.demarchi@profusion.mobi", "demarchi@webkit.org"], "demarchi"),
Committer("Lucas Forschler", ["lforschler@apple.com"], "lforschler"),
Committer("Luiz Agostini", ["luiz@webkit.org", "luiz.agostini@openbossa.org"], "lca"),
Committer("Mads Ager", "ager@chromium.org"),
Committer("Marcus Voltis Bulach", "bulach@chromium.org"),
Committer("Mario Sanchez Prada", ["msanchez@igalia.com", "mario@webkit.org"], "msanchez"),
Committer("Matt Delaney", "mdelaney@apple.com"),
Committer("Matt Lilek", ["webkit@mattlilek.com", "pewtermoose@webkit.org"]),
Committer("Matt Perry", "mpcomplete@chromium.org"),
Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]),
Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"], "maxime.simon"),
Committer("Michael Nordman", "michaeln@google.com", "michaeln"),
Committer("Michael Saboff", "msaboff@apple.com"),
Committer("Michelangelo De Simone", "michelangelo@webkit.org", "michelangelo"),
Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]),
Committer("Mike Fenton", ["mifenton@rim.com", "mike.fenton@torchmobile.com"], "mfenton"),
Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
Committer("Mikhail Naganov", "mnaganov@chromium.org"),
Committer("MORITA Hajime", "morrita@google.com", "morrita"),
Committer("Nico Weber", ["thakis@chromium.org", "thakis@google.com"], "thakis"),
Committer("Noam Rosenthal", "noam.rosenthal@nokia.com", "noamr"),
Committer("Pam Greene", "pam@chromium.org", "pamg"),
Committer("Patrick Gansterer", ["paroga@paroga.com", "paroga@webkit.org"], "paroga"),
Committer("Pavel Podivilov", "podivilov@chromium.org", "podivilov"),
Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"], "pkasting"),
Committer("Peter Varga", ["pvarga@webkit.org", "pvarga@inf.u-szeged.hu"], "stampho"),
Committer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org"], "philn-tp"),
Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"], "pdherbemont"),
Committer("Pierre-Olivier Latour", "pol@apple.com", "pol"),
Committer("Pratik Solanki", "psolanki@apple.com", "psolanki"),
Committer("Qi Zhang", ["qi.2.zhang@nokia.com", "qi.zhang02180@gmail.com"], "qi"),
Committer("Renata Hodovan", "reni@webkit.org", "reni"),
Committer("Robert Hogan", ["robert@webkit.org", "robert@roberthogan.net", "lists@roberthogan.net"], "mwenge"),
Committer("Roland Steiner", "rolandsteiner@chromium.org"),
Committer("Satish Sampath", "satish@chromium.org"),
Committer("Scott Violet", "sky@chromium.org", "sky"),
Committer("Sergio Villar Senin", ["svillar@igalia.com", "sergio@webkit.org"], "svillar"),
Committer("Stephen White", "senorblanco@chromium.org", "senorblanco"),
Committer("Trey Matteson", "trey@usa.net", "trey"),
Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]),
Committer("Vangelis Kokkevis", "vangelis@chromium.org", "vangelis"),
Committer("Victor Wang", "victorw@chromium.org", "victorw"),
Committer("Vitaly Repeshko", "vitalyr@chromium.org"),
Committer("William Siegrist", "wsiegrist@apple.com", "wms"),
Committer("W. James MacLean", "wjmaclean@chromium.org", "wjmaclean"),
Committer("Xiaomei Ji", "xji@chromium.org", "xji"),
Committer("Yael Aharon", "yael.aharon@nokia.com", "yael"),
Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),
Committer("Yong Li", ["yong.li.webkit@gmail.com", "yong.li@torchmobile.com"], "yong"),
Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"),
Committer("Yi Shen", ["yi.4.shen@nokia.com", "shenyi2006@gmail.com"]),
Committer("Yuta Kitamura", "yutak@chromium.org", "yutak"),
Committer("Yuzo Fujishima", "yuzo@google.com", "yuzo"),
Committer("Zhenyao Mo", "zmo@google.com", "zhenyao"),
Committer("Zoltan Herczeg", "zherczeg@webkit.org", "zherczeg"),
Committer("Zoltan Horvath", ["zoltan@webkit.org", "hzoltan@inf.u-szeged.hu", "horvath.zoltan.6@stud.u-szeged.hu"], "zoltan"),
]
# This is intended as a canonical, machine-readable list of all reviewers for
# WebKit. If your name is missing here and you are a reviewer, please add it.
# No review needed.
reviewers_list = [
Reviewer("Ada Chan", "adachan@apple.com", "chanada"),
Reviewer("Adam Barth", "abarth@webkit.org", "abarth"),
Reviewer("Adam Roben", "aroben@apple.com", "aroben"),
Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org", "atreat@rim.com"], "manyoso"),
Reviewer("Adele Peterson", "adele@apple.com", "adele"),
Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"),
Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"),
Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"], "alp"),
Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"], "andersca"),
Reviewer("Andreas Kling", ["kling@webkit.org", "andreas.kling@nokia.com"], "kling"),
Reviewer("Antonio Gomes", ["tonikitoo@webkit.org", "agomes@rim.com"], "tonikitoo"),
Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com", "antti.j.koivisto@nokia.com"], "anttik"),
Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@sencha.com", "ariya@webkit.org"], "ariya"),
Reviewer("Benjamin Poulain", ["benjamin@webkit.org", "benjamin.poulain@nokia.com", "ikipou@gmail.com"], "benjaminp"),
Reviewer("Beth Dakin", "bdakin@apple.com", "dethbakin"),
Reviewer("Brady Eidson", "beidson@apple.com", "bradee-oh"),
Reviewer("Brian Weinstein", "bweinstein@apple.com", "bweinstein"),
Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
Reviewer("Chris Blumenberg", "cblu@apple.com", "cblu"),
Reviewer("Chris Marrin", "cmarrin@apple.com", "cmarrin"),
Reviewer("Chris Fleizach", "cfleizach@apple.com", "cfleizach"),
Reviewer("Chris Jerdonek", "cjerdonek@webkit.org", "cjerdonek"),
Reviewer(u"Csaba Osztrogon\u00e1c", "ossy@webkit.org", "ossy"),
Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"], "mitzpettel"),
Reviewer("Daniel Bates", "dbates@webkit.org", "dydz"),
Reviewer("Darin Adler", "darin@apple.com", "darin"),
Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"], "fishd"),
Reviewer("David Harrison", "harrison@apple.com", "harrison"),
Reviewer("David Hyatt", "hyatt@apple.com", "hyatt"),
Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"], "ddkilzer"),
Reviewer("David Levin", "levin@chromium.org", "dave_levin"),
Reviewer("Dimitri Glazkov", "dglazkov@chromium.org", "dglazkov"),
Reviewer("Dirk Schulze", "krit@webkit.org", "krit"),
Reviewer("Dmitry Titov", "dimich@chromium.org", "dimich"),
Reviewer("Don Melton", "gramps@apple.com", "gramps"),
Reviewer("Dumitru Daniliuc", "dumi@chromium.org", "dumi"),
Reviewer("Eric Carlson", "eric.carlson@apple.com"),
Reviewer("Eric Seidel", "eric@webkit.org", "eseidel"),
Reviewer("Gavin Barraclough", "barraclough@apple.com", "gbarra"),
Reviewer("Geoffrey Garen", "ggaren@apple.com", "ggaren"),
Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]),
Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org", "gustavo.noronha@collabora.co.uk"], "kov"),
Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"], "zecke"),
Reviewer("James Robinson", ["jamesr@chromium.org", "jamesr@google.com"], "jamesr"),
Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"], "janm"),
Reviewer("Jeremy Orlow", "jorlow@chromium.org", "jorlow"),
Reviewer("Jian Li", "jianli@chromium.org", "jianli"),
Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"),
Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"),
Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"),
Reviewer("Justin Garcia", "justin.garcia@apple.com", "justing"),
Reviewer("Ken Kocienda", "kocienda@apple.com"),
Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.christiansen@openbossa.org", "kenneth.christiansen@gmail.com"], "kenne"),
Reviewer("Kenneth Russell", "kbr@google.com", "kbr_google"),
Reviewer("Kent Tamura", "tkent@chromium.org", "tkent"),
Reviewer("Kevin Decker", "kdecker@apple.com", "superkevin"),
Reviewer("Kevin McCullough", "kmccullough@apple.com", "maculloch"),
Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"], "kollivier"),
Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org", "lars.knoll@nokia.com"], "lars"),
Reviewer("Laszlo Gombos", "laszlo.1.gombos@nokia.com", "lgombos"),
Reviewer("Maciej Stachowiak", "mjs@apple.com", "othermaciej"),
Reviewer("Mark Rowe", "mrowe@apple.com", "bdash"),
Reviewer("Martin Robinson", ["mrobinson@webkit.org", "mrobinson@igalia.com", "martin.james.robinson@gmail.com"], "mrobinson"),
Reviewer("Mihai Parparita", "mihaip@chromium.org", "mihaip"),
Reviewer("Nate Chapin", "japhet@chromium.org", "japhet"),
Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org"], "wildfox"),
Reviewer("Ojan Vafai", "ojan@chromium.org", "ojan"),
Reviewer("Oliver Hunt", "oliver@apple.com", "olliej"),
Reviewer("Pavel Feldman", "pfeldman@chromium.org", "pfeldman"),
Reviewer("Richard Williamson", "rjw@apple.com", "rjw"),
Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org"], "rwlbuis"),
Reviewer("Ryosuke Niwa", "rniwa@webkit.org", "rniwa"),
Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"], "weinig"),
Reviewer("Shinichiro Hamaji", "hamaji@chromium.org", "hamaji"),
Reviewer("Simon Fraser", "simon.fraser@apple.com", "smfr"),
Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@nokia.com"], "tronical"),
Reviewer("Stephanie Lewis", "slewis@apple.com", "sundiamonde"),
Reviewer("Steve Block", "steveblock@google.com", "steveblock"),
Reviewer("Steve Falkenburg", "sfalken@apple.com", "sfalken"),
Reviewer("Tim Omernick", "timo@apple.com"),
Reviewer("Timothy Hatcher", ["timothy@apple.com", "timothy@hatcher.name"], "xenon"),
Reviewer("Tony Chang", "tony@chromium.org", "tony^work"),
Reviewer("Tony Gentilcore", "tonyg@chromium.org", "tonyg-cr"),
Reviewer(u"Tor Arne Vestb\u00f8", ["vestbo@webkit.org", "tor.arne.vestbo@nokia.com"], "torarne"),
Reviewer("Vicki Murley", "vicki@apple.com"),
Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org"], "xan"),
Reviewer("Yury Semikhatsky", "yurys@chromium.org", "yurys"),
Reviewer("Zack Rusin", "zack@kde.org", "zackr"),
]
class CommitterList:
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=committers_unable_to_review,
reviewers=reviewers_list):
self._committers = committers + reviewers
self._reviewers = reviewers
self._committers_by_email = {}
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _email_to_committer_map(self):
if not len(self._committers_by_email):
for committer in self._committers:
for email in committer.emails:
self._committers_by_email[email] = committer
return self._committers_by_email
def committer_by_name(self, name):
# This could be made into a hash lookup if callers need it to be fast.
for committer in self.committers():
if committer.full_name == name:
return committer
def committer_by_email(self, email):
return self._email_to_committer_map().get(email)
def reviewer_by_email(self, email):
committer = self.committer_by_email(email)
if committer and not committer.can_review:
return None
return committer
|
danialbehzadi/Nokia-RM-1013-2.0.0.11
|
webkit/Tools/Scripts/webkitpy/common/config/committers.py
|
Python
|
gpl-3.0
| 22,137
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-01-20 18:43
from __future__ import unicode_literals
import api.models
from django.db import migrations, models
import django.db.models.deletion
def fix_status(apps, schema_editor):
db_alias = schema_editor.connection.alias
AstronautType = apps.get_model("configurations", "AstronautType")
AstronautType.objects.using(db_alias).create(id=2, name="Government")
AstronautType.objects.using(db_alias).create(id=3, name="Private")
Astronaut = apps.get_model('api', 'Astronaut')
for astronaut in Astronaut.objects.all():
if astronaut.status.id <= 8:
astronaut.type = AstronautType.objects.get(id=2)
else:
astronaut.type = AstronautType.objects.get(id=3)
astronaut.save()
class Migration(migrations.Migration):
dependencies = [
('api', '0060_astronaut_type'),
]
operations = [
migrations.RunPython(fix_status),
]
|
ItsCalebJones/SpaceLaunchNow-Server
|
api/migrations/0061_astronaut_type.py
|
Python
|
apache-2.0
| 980
|
from django.core.management import call_command
from django.test import TestCase
from councils.tests.factories import CouncilFactory
class HomeViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
CouncilFactory(
council_id="X01",
identifiers=["X01"],
geography__geography=None,
)
for fixture in [
"test_routing.json",
"test_multiple_addresses_single_polling_station.json",
]:
call_command( # Hack to avoid converting all fixtures to factories
"loaddata",
fixture,
verbosity=0,
)
def test_get(self):
response = self.client.get("/")
self.assertEqual(200, response.status_code)
def test_redirect(self):
response = self.client.post(
r"/?utm_source=foo&something=other", {"postcode": "CC1 1AA"}, follow=False
)
self.assertEqual(302, response.status_code)
# The query string isn't preserved, because they've come from the home page, and it could be either uprn
# for the postcode given (hence the [23])
self.assertRegex(response["Location"], r"/address/10[23]/")
class PostCodeViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
CouncilFactory(
council_id="X01",
identifiers=["X01"],
geography__geography=None,
)
for fixture in ["test_routing.json", "test_multiple_polling_stations.json"]:
call_command( # Hack to avoid converting all fixtures to factories
"loaddata",
fixture,
verbosity=0,
)
def test_redirect_if_should_be_other_view(self):
# This should go to the address picker, because it's split over multiple polling districts
response = self.client.get(
"/postcode/DD11DD/?utm_source=foo&something=other", follow=False
)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], "/address_select/DD11DD/?utm_source=foo")
class PostCodeViewNoStationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
CouncilFactory(
council_id="X01",
name="Foo Council",
electoral_services_phone_numbers=["01314 159265"],
identifiers=["X01"],
geography__geography=None,
)
for fixture in [
"test_single_address_blank_polling_station.json",
"test_postcode_not_in_addressbase.json",
]:
call_command( # Hack to avoid converting all fixtures to factories
"loaddata",
fixture,
verbosity=0,
)
def test_polling_station_is_blank(self):
response = self.client.get(
"/postcode/BB11BB/?utm_source=foo&something=other", follow=False
)
self.assertEqual(200, response.status_code)
self.assertContains(response, "ontact Foo Council")
self.assertContains(response, "tel:01314 159265")
def test_post_code_not_in_addressbase(self):
response = self.client.get(
"/postcode/HJ67KL/?utm_source=foo&something=other", follow=False
)
self.assertEqual(200, response.status_code)
self.assertContains(response, "Contact Foo Council")
self.assertContains(response, "tel:01314 159265")
class WeDontknowViewTestCase(TestCase):
"""
'FF22FF' is a postcode with uprns in multiple councils and some polling stations.
'GG22GG' is a postcode with uprns in multiple councils and no polling stations.
'HH22HH' is a postcode with uprns in a single council and no polling stations.
"""
@classmethod
def setUpTestData(cls):
CouncilFactory(
council_id="FOO",
name="Foo Council",
identifiers=["X01"],
geography__geography=None,
)
CouncilFactory(
council_id="BAR",
name="Bar Borough",
identifiers=["X02"],
geography__geography=None,
)
call_command( # Hack to avoid converting all fixtures to factories
"loaddata",
"test_uprns_in_multiple_councils",
verbosity=0,
)
def test_not_multiple_redirect(self):
response = self.client.post(
r"/?utm_source=foo&something=other", {"postcode": "HH2 2HH"}, follow=False
)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], r"/postcode/HH22HH/")
def test_home_redirect(self):
response = self.client.post(
r"/?utm_source=foo&something=other", {"postcode": "FF2 2FF"}, follow=False
)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], r"/address_select/FF22FF/")
def test_we_dont_know_redirect(self):
response = self.client.get("/we_dont_know/FF22FF/", follow=False)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], r"/multiple_councils/FF22FF/")
def test_multiple_councils_view(self):
response = self.client.get("/multiple_councils/FF22FF/", follow=False)
self.assertContains(
response,
"Residents in FF22FF may be in one of the following council areas:",
)
self.assertContains(response, "Foo Council")
self.assertContains(response, "Bar Borough")
def test_home_redirect_no_stations(self):
response = self.client.post(
r"/?utm_source=foo&something=other", {"postcode": "GG2 2GG"}, follow=False
)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], r"/address_select/GG22GG/")
def test_postcode_redirect_no_stations(self):
response = self.client.get("/postcode/GG22GG/", follow=False)
self.assertEqual(302, response.status_code)
self.assertEqual(response["Location"], r"/address_select/GG22GG/")
def test_multiple_councils_no_stations(self):
response = self.client.get("/multiple_councils/GG22GG/", follow=False)
self.assertContains(
response,
"Residents in GG22GG may be in one of the following council areas:",
)
self.assertContains(response, "Foo Council")
self.assertContains(response, "Bar Borough")
|
DemocracyClub/UK-Polling-Stations
|
polling_stations/apps/data_finder/tests/test_views.py
|
Python
|
bsd-3-clause
| 6,487
|
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:@127.0.0.1:3306/unrealknights', echo=True)
|
HueyPark/Unreal-Knights
|
Server/Code/database/engine.py
|
Python
|
mit
| 126
|
import numpy as np
import copy
def main(cl_reg_fit, max_mag):
"""
Reject stars beyond the maximum magnitude given.
"""
# Maximum observed (main) magnitude.
max_mag_obs = np.max(list(zip(*list(zip(*cl_reg_fit))[1:][2]))[0])
if max_mag == 'max':
# No magnitude cut applied.
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
else:
star_lst = []
for star in cl_reg_fit:
# Check main magnitude value.
if star[3][0] <= max_mag:
# Keep stars brighter that the magnitude limit.
star_lst.append(star)
# Check number of stars left.
if len(star_lst) > 10:
# For the synthetic clusters, use the minimum value between the
# selected 'max_mag' value and the maximum observed magnitude.
# This prevents large 'max_mag' values from generating synthetic
# clusters with low mass stars in the not-observed region.
cl_max_mag, max_mag_syn = star_lst, min(max_mag, max_mag_obs)
print("Maximum magnitude cut applied ({:.1f} mag)".format(
max_mag_syn))
else:
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
print(" WARNING: less than 10 stars left after removing\n"
" stars by magnitude limit. No removal applied.")
return cl_max_mag, max_mag_syn
|
asteca/ASteCA
|
packages/best_fit/max_mag_cut.py
|
Python
|
gpl-3.0
| 1,440
|
# -*- coding: utf-8 -*-
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2011 Krzysztof Tarnowski (krzysztof.tarnowski@ymail.com)
# Copyright (C) 2009, 2010, 2011 OpenHatch, Inc.
# Copyright (C) 2011 Jairo E. Lopez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, HttpResponseBadRequest
from mysite.base.view_helpers import render_response
import json
from django.template import loader, Context
import mysite.account
import mysite.profile.view_helpers
import mysite.account.forms
from mysite.base.decorators import view
import mysite.customs.feed
import mysite.search.view_helpers
import mysite.search.models
import mysite.missions.models
import random
import datetime
import logging
from django.contrib.auth.decorators import login_required
from django.conf import settings
logger = logging.getLogger(__name__)
def front_page_data():
data = {}
data['entries'] = mysite.customs.feed.cached_blog_entries()[:1]
feed_items = list(
mysite.search.models.Answer.objects.order_by('-created_date')[:5])
feed_items.extend(
mysite.search.models.WannaHelperNote.objects.order_by('-created_date')[:5])
feed_items.sort(key=lambda x: x.modified_date, reverse=True)
data['recent_feed_items'] = feed_items[:5]
return data
@view
def home(request):
data = front_page_data()
everybody = list(
mysite.profile.models.Person.objects.exclude(link_person_tag=None))
random.shuffle(everybody)
data['random_profiles'] = everybody[0:5]
if request.user.is_authenticated():
template_path = 'base/index.html'
# figure oout which nudges we want to show them
person = request.user.get_profile()
data['nudge_location'] = person.should_be_nudged_about_location()
data['nudge_tags'] = not person.get_tags_for_recommendations()
data['nudge_missions'] = not mysite.missions.models.StepCompletion.objects.filter(
person=person)
if person.get_published_portfolio_entries():
data['nudge_importer_when_user_has_some_projects'
] = True # just nudge about the importer...
else:
# the person has entered zero projects and hasn't touched the importer
# so introduce him or her to use the importer!
data['nudge_importer_when_user_has_no_projects'
] = True # give the general project editing nudge
data['show_nudge_box'] = (data['nudge_location'] or
'nudge_importer_when_user_has_no_projects' in data or data['nudge_tags'] or
'nudge_importer_when_user_has_some_projects' in data)
# For performance reasons, we do not send bug recommendations here.
completed_missions = dict((c.step.name, True)
for c in mysite.missions.models.StepCompletion.objects.filter(person=request.user.get_profile()))
data[u'completed_missions'] = completed_missions
data[u'projects_i_wanna_help'] = person.projects_i_wanna_help.all()
data[u'projects_i_helped'] = person.get_published_portfolio_entries()
# These are for project maintainers
data[u'projects_with_wannahelpers'] = [
pfe.project for pfe in person.get_published_portfolio_entries()
if pfe.project.wannahelpernote_set.all().count()]
data[u'maintainer_nudges'] = maintainer_nudges = {}
maintainer_nudges['show_project_page'] = (
person.get_published_portfolio_entries() and
not person.user.answer_set.all())
maintainer_nudges[u'add_bug_tracker'] = (
person.get_published_portfolio_entries() and
(not any([pfe.project.bug_set.all()
for pfe in person.get_published_portfolio_entries()])))
else: # no user logged in. Show front-page
template_path = 'base/index.html'
return (request, template_path, data)
def page_to_js(request):
# FIXME: In the future, use:
# from django.template.loader import render_to_string
# to generate html_doc
html_doc = "<strong>zomg</strong>"
encoded_for_js = json.dumps(html_doc)
# Note: using application/javascript as suggested by
# http://www.ietf.org/rfc/rfc4329.txt
return render_response(request, 'base/append_ourselves.js',
{'in_string': encoded_for_js},
mimetype='application/javascript')
def page_not_found(request):
t = loader.get_template('404.html')
c = Context({
'user': request.user
})
response = HttpResponse(t.render(c), status=404)
return response
def geocode(request):
address = request.GET.get('address', None)
if not address:
return HttpResponseBadRequest() # no address :-(
# try to geocode
coordinates_as_json = mysite.base.view_helpers.cached_geocoding_in_json(
address)
if coordinates_as_json == 'null':
# We couldn't geocode that.
return HttpResponseBadRequest() # no address :-(
return HttpResponse(coordinates_as_json,
mimetype='application/json')
# Obtains meta data for request return
def meta_data():
data = {}
data['bug_diagnostics'] = {}
# local name for shortness
my = data['bug_diagnostics']
one_hour_and_two_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=2, hours=1))
my['Bugs last polled more than than two days + one hour ago'] = mysite.search.models.Bug.open_ones.filter(
last_polled__lt=one_hour_and_two_days_ago).count()
three_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=3))
my['Bugs last polled more than three days ago'] = mysite.search.models.Bug.open_ones.filter(
last_polled__lt=three_days_ago).count()
# Test for 0 division
allbug = mysite.search.models.Bug.open_ones.count()
perbug = 0.0
if allbug:
perbug = (mysite.search.models.Bug.open_ones.filter(
last_polled__lt=three_days_ago).count() * 100.0 / allbug)
my['Bugs last polled more than three days ago (in percent)'] = perbug
return data
def meta_exit_code(data=None):
if data is None:
data = meta_data()
# Temp variable for shortness
my = data['bug_diagnostics']
# More temp variables for shortness
bug1 = my['Bugs last polled more than than two days + one hour ago']
bug2 = my['Bugs last polled more than three days ago']
perbug = my['Bugs last polled more than three days ago (in percent)']
# Exit codes and stdout for Nagios integration
if bug2:
logger.error("{0} - Polled 2+: {1} Polled 3+: {2} ({3}%)".format("CRITICAL", bug1, bug2, perbug))
return 2
elif bug1:
logger.warning("{0} - Polled 2+: {1} Polled 3+: {2} ({3}%)".format("WARNING", bug1, bug2, perbug))
return 1
else:
logger.info("{0} - Polled 2+: {1} Polled 3+: {2} ({3}%)".format("OK", bug1, bug2, perbug))
return 0
@view
def meta(request):
return (request, 'meta.html', meta_data())
@login_required
def save_portfolio_entry_ordering_do(request):
from mysite.profile.models import PortfolioEntry
list_of_ids = request.POST.getlist('sortable_portfolio_entry[]')
are_we_archiving_yet = False
for n, id in enumerate(list_of_ids):
if id == 'FOLD': # ha not an id
are_we_archiving_yet = True
continue
pfe = PortfolioEntry.objects.get(id=int(id), person__user=request.user)
pfe.sort_order = -n # negated so we can sort descending
pfe.is_archived = are_we_archiving_yet
pfe.save()
return HttpResponse('1')
@view
def landing_for_opp_hunters(request):
return (request, 'landing_for_opp_hunters.html',
front_page_data())
@view
def landing_for_project_maintainers(request):
return (request, 'landing_for_project_maintainers.html',
front_page_data())
@view
def landing_for_documenters(request):
return (request, 'landing_for_documenters.html',
front_page_data())
@login_required
def test_email_re_projects(request):
from mysite.profile.management.commands import send_emails
from mysite.profile.models import Person
command = send_emails.Command()
command.this_run_covers_things_since = datetime.datetime(2009, 5, 28)
command.this_run_covers_things_up_until = datetime.datetime.utcnow()
context = command.get_context_for_email_to(
request.user.get_profile()) or {}
if context:
return mysite.base.decorators.as_view(request, 'email_re_projects.html', context, "test_email_re_projects")
else:
return HttpResponse("(We couldn't find any recent project activity for you, so you wouldn't get an email updating you about it.)")
# The following view(s) generate stub pages that get converted
# into themes for other system(s).
#
# Right now, there's only one for a single page in a single
# other theming system. Enjoy!
@view
def wordpress_index(request):
template_path = 'base/wordpress_index.html'
data = {}
return (request, template_path, data)
def render_robots_txt(request):
if settings.DEBUG:
template_path = "robots_for_dev_env.txt"
else:
template_path = "robots_for_live_site.txt"
return render_response(request, template_path, mimetype='text/plain')
|
campbe13/openhatch
|
mysite/base/views.py
|
Python
|
agpl-3.0
| 10,109
|
from sympy.core import Add, S, C, sympify
from sympy.core.function import Function
from zeta_functions import zeta
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.miscellaneous import sqrt
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return gamma(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return C.Factorial(arg-1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
def _eval_expand_func(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
if arg.is_Add:
for i, coeff in enumerate(arg.args):
if arg.args[i].is_Number:
terms = C.Add(*(arg.args[:i] + arg.args[i+1:]))
if coeff.is_Rational:
if coeff.q != 1:
terms += C.Rational(1, coeff.q)
coeff = C.Integer(int(coeff))
else:
continue
return gamma(terms)*C.RisingFactorial(terms, coeff)
return self.func(*self.args)
def _eval_is_real(self):
return self.args[0].is_real
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
"""Lower incomplete gamma function"""
nargs = 2
@classmethod
def eval(cls, a, x):
if a.is_Number:
if a is S.One:
return S.One - C.exp(-x)
elif a.is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * C.exp(-x)
class uppergamma(Function):
"""Upper incomplete gamma function"""
nargs = 2
def fdiff(self, argindex=2):
if argindex == 2:
a, z = self[0:2]
return -C.exp(-z)*z**(a-1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, z):
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
return gamma(a)
if a.is_Number:
if a is S.One:
return C.exp(-z)
elif a.is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * C.exp(-z)
###############################################################################
########################### GAMMA RELATED FUNCTIONS ###########################
###############################################################################
class polygamma(Function):
nargs = 2
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[0:2]
return polygamma(n+1, z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, n, z):
n, z = map(sympify, (n, z))
if n.is_integer:
if n.is_negative:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + C.harmonic(z-1, 1)
elif n.is_odd:
return (-1)**(n+1)*C.Factorial(n)*zeta(n+1, z)
def _eval_expand_func(self, deep=True, **hints):
if deep:
hints['func'] = False
n = self.args[0].expand(deep, **hints)
z = self.args[1].expand(deep, **hints)
else:
n, z = self.args[0], self.args[1].expand(deep, func=True)
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff, factors = z.as_coeff_factors()
if coeff.is_Integer:
tail = Add(*[ z + i for i in xrange(0, int(coeff)) ])
return polygamma(n, z-coeff) + (-1)**n*C.Factorial(n)*tail
elif z.is_Mul:
coeff, terms = z.as_coeff_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + i//coeff) for i in xrange(0, int(coeff)) ]
if n is S.Zero:
return log(coeff) + Add(*tail)/coeff**(n+1)
else:
return Add(*tail)/coeff**(n+1)
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
return (-1)**(n+1)*C.Factorial(n)*zeta(n+1, z-1)
class loggamma(Function):
nargs = 1
|
tovrstra/sympy
|
sympy/functions/special/gamma_functions.py
|
Python
|
bsd-3-clause
| 6,564
|
import pytest
import sqlparse
from sqlparse import sql, tokens as T
def test_grouping_parenthesis():
s = 'select (select (x3) x2) and (y2) bar'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert len(parsed.tokens) == 7
assert isinstance(parsed.tokens[2], sql.Parenthesis)
assert isinstance(parsed.tokens[-1], sql.Identifier)
assert len(parsed.tokens[2].tokens) == 5
assert isinstance(parsed.tokens[2].tokens[3], sql.Identifier)
assert isinstance(parsed.tokens[2].tokens[3].tokens[0], sql.Parenthesis)
assert len(parsed.tokens[2].tokens[3].tokens) == 3
def test_grouping_comments():
s = '/*\n * foo\n */ \n bar'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert len(parsed.tokens) == 2
@pytest.mark.parametrize('s', ['foo := 1;', 'foo := 1'])
def test_grouping_assignment(s):
parsed = sqlparse.parse(s)[0]
assert len(parsed.tokens) == 1
assert isinstance(parsed.tokens[0], sql.Assignment)
@pytest.mark.parametrize('s', ["x > DATE '2020-01-01'", "x > TIMESTAMP '2020-01-01 00:00:00'"])
def test_grouping_typed_literal(s):
parsed = sqlparse.parse(s)[0]
assert isinstance(parsed[0][4], sql.TypedLiteral)
@pytest.mark.parametrize('s, a, b', [
('select a from b where c < d + e', sql.Identifier, sql.Identifier),
('select a from b where c < d + interval \'1 day\'', sql.Identifier, sql.TypedLiteral),
('select a from b where c < d + interval \'6\' month', sql.Identifier, sql.TypedLiteral),
('select a from b where c < current_timestamp - interval \'1 day\'', sql.Token, sql.TypedLiteral),
])
def test_compare_expr(s, a, b):
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[2], sql.Identifier)
assert isinstance(parsed.tokens[6], sql.Identifier)
assert isinstance(parsed.tokens[8], sql.Where)
assert len(parsed.tokens) == 9
where = parsed.tokens[8]
assert isinstance(where.tokens[2], sql.Comparison)
assert len(where.tokens) == 3
comparison = where.tokens[2]
assert isinstance(comparison.tokens[0], sql.Identifier)
assert comparison.tokens[2].ttype is T.Operator.Comparison
assert isinstance(comparison.tokens[4], sql.Operation)
assert len(comparison.tokens) == 5
operation = comparison.tokens[4]
assert isinstance(operation.tokens[0], a)
assert operation.tokens[2].ttype is T.Operator
assert isinstance(operation.tokens[4], b)
assert len(operation.tokens) == 5
def test_grouping_identifiers():
s = 'select foo.bar from "myscheme"."table" where fail. order'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[2], sql.Identifier)
assert isinstance(parsed.tokens[6], sql.Identifier)
assert isinstance(parsed.tokens[8], sql.Where)
s = 'select * from foo where foo.id = 1'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[-1].tokens[-1].tokens[0], sql.Identifier)
s = 'select * from (select "foo"."id" from foo)'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[-1].tokens[3], sql.Identifier)
for s in ["INSERT INTO `test` VALUES('foo', 'bar');",
"INSERT INTO `test` VALUES(1, 2), (3, 4), (5, 6);",
"INSERT INTO `test(a, b)` VALUES(1, 2), (3, 4), (5, 6);"]:
parsed = sqlparse.parse(s)[0]
types = [l.ttype for l in parsed.tokens if not l.is_whitespace]
assert types == [T.DML, T.Keyword, None, None, T.Punctuation]
assert isinstance(parsed.tokens[6], sql.Values)
s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"
parsed = sqlparse.parse(s)[0]
assert len(parsed.tokens) == 7
assert isinstance(parsed.tokens[2], sql.IdentifierList)
assert len(parsed.tokens[2].tokens) == 4
identifiers = list(parsed.tokens[2].get_identifiers())
assert len(identifiers) == 2
assert identifiers[0].get_alias() == "col"
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
parsed = sqlparse.parse(s)[0]
assert isinstance(parsed.tokens[0], sql.Identifier)
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
parsed = sqlparse.parse(s)[0]
assert isinstance(parsed.tokens[0], sql.IdentifierList)
def test_grouping_identifier_wildcard():
p = sqlparse.parse('a.*, b.id')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[-1], sql.Identifier)
def test_grouping_identifier_name_wildcard():
p = sqlparse.parse('a.*')[0]
t = p.tokens[0]
assert t.get_name() == '*'
assert t.is_wildcard() is True
def test_grouping_identifier_invalid():
p = sqlparse.parse('a.')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert p.tokens[0].has_alias() is False
assert p.tokens[0].get_name() is None
assert p.tokens[0].get_real_name() is None
assert p.tokens[0].get_parent_name() == 'a'
def test_grouping_identifier_invalid_in_middle():
# issue261
s = 'SELECT foo. FROM foo'
p = sqlparse.parse(s)[0]
assert isinstance(p[2], sql.Identifier)
assert p[2][1].ttype == T.Punctuation
assert p[3].ttype == T.Whitespace
assert str(p[2]) == 'foo.'
@pytest.mark.parametrize('s', ['foo as (select *)', 'foo as(select *)'])
def test_grouping_identifer_as(s):
# issue507
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[0], sql.Identifier)
token = p.tokens[0].tokens[2]
assert token.ttype == T.Keyword
assert token.normalized == 'AS'
def test_grouping_identifier_as_invalid():
# issue8
p = sqlparse.parse('foo as select *')[0]
assert len(p.tokens), 5
assert isinstance(p.tokens[0], sql.Identifier)
assert len(p.tokens[0].tokens) == 1
assert p.tokens[2].ttype == T.Keyword
def test_grouping_identifier_function():
p = sqlparse.parse('foo() as bar')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[0], sql.Function)
p = sqlparse.parse('foo()||col2 bar')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[0], sql.Operation)
assert isinstance(p.tokens[0].tokens[0].tokens[0], sql.Function)
@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100'])
def test_grouping_operation(s):
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[0], sql.Operation)
def test_grouping_identifier_list():
p = sqlparse.parse('a, b, c')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
p = sqlparse.parse('(a, b, c)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.IdentifierList)
def test_grouping_identifier_list_subquery():
"""identifier lists should still work in subqueries with aliases"""
p = sqlparse.parse("select * from ("
"select a, b + c as d from table) sub")[0]
subquery = p.tokens[-1].tokens[0]
idx, iden_list = subquery.token_next_by(i=sql.IdentifierList)
assert iden_list is not None
# all the identifiers should be within the IdentifierList
_, ilist = subquery.token_next_by(i=sql.Identifier, idx=idx)
assert ilist is None
def test_grouping_identifier_list_case():
p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
p = sqlparse.parse('(a, case when 1 then 2 else 3 end as b, c)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.IdentifierList)
def test_grouping_identifier_list_other():
# issue2
p = sqlparse.parse("select *, null, 1, 'foo', bar from mytable, x")[0]
assert isinstance(p.tokens[2], sql.IdentifierList)
assert len(p.tokens[2].tokens) == 13
def test_grouping_identifier_list_with_inline_comments():
# issue163
p = sqlparse.parse('foo /* a comment */, bar')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[3], sql.Identifier)
def test_grouping_identifiers_with_operators():
p = sqlparse.parse('a+b as c from table where (d-e)%2= 1')[0]
assert len([x for x in p.flatten() if x.ttype == T.Name]) == 5
def test_grouping_identifier_list_with_order():
# issue101
p = sqlparse.parse('1, 2 desc, 3')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[3], sql.Identifier)
assert str(p.tokens[0].tokens[3]) == '2 desc'
def test_grouping_where():
s = 'select * from foo where bar = 1 order by id desc'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert len(p.tokens) == 12
s = 'select x from (select y from foo where bar = 1) z'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert isinstance(p.tokens[-1].tokens[0].tokens[-2], sql.Where)
@pytest.mark.parametrize('s', (
'select 1 where 1 = 2 union select 2',
'select 1 where 1 = 2 union all select 2',
))
def test_grouping_where_union(s):
p = sqlparse.parse(s)[0]
assert p.tokens[5].value.startswith('union')
def test_returning_kw_ends_where_clause():
s = 'delete from foo where x > y returning z'
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[6], sql.Where)
assert p.tokens[7].ttype == T.Keyword
assert p.tokens[7].value == 'returning'
def test_into_kw_ends_where_clause(): # issue324
s = 'select * from foo where a = 1 into baz'
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[8], sql.Where)
assert p.tokens[9].ttype == T.Keyword
assert p.tokens[9].value == 'into'
@pytest.mark.parametrize('sql, expected', [
# note: typecast needs to be 2nd token for this test
('select foo::integer from bar', 'integer'),
('select (current_database())::information_schema.sql_identifier',
'information_schema.sql_identifier'),
])
def test_grouping_typecast(sql, expected):
p = sqlparse.parse(sql)[0]
assert p.tokens[2].get_typecast() == expected
def test_grouping_alias():
s = 'select foo as bar from mytable'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[2].get_real_name() == 'foo'
assert p.tokens[2].get_alias() == 'bar'
s = 'select foo from mytable t1'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[6].get_real_name() == 'mytable'
assert p.tokens[6].get_alias() == 't1'
s = 'select foo::integer as bar from mytable'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[2].get_alias() == 'bar'
s = ('SELECT DISTINCT '
'(current_database())::information_schema.sql_identifier AS view')
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[4].get_alias() == 'view'
def test_grouping_alias_case():
# see issue46
p = sqlparse.parse('CASE WHEN 1 THEN 2 ELSE 3 END foo')[0]
assert len(p.tokens) == 1
assert p.tokens[0].get_alias() == 'foo'
def test_grouping_subquery_no_parens():
# Not totally sure if this is the right approach...
# When a THEN clause contains a subquery w/o parenthesis around it *and*
# a WHERE condition, the WHERE grouper consumes END too.
# This takes makes sure that it doesn't fail.
p = sqlparse.parse('CASE WHEN 1 THEN select 2 where foo = 1 end')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Case)
@pytest.mark.parametrize('s', ['foo.bar', 'x, y', 'x > y', 'x / y'])
def test_grouping_alias_returns_none(s):
# see issue185 and issue445
p = sqlparse.parse(s)[0]
assert len(p.tokens) == 1
assert p.tokens[0].get_alias() is None
def test_grouping_idlist_function():
# see issue10 too
p = sqlparse.parse('foo(1) x, bar')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
def test_grouping_comparison_exclude():
# make sure operators are not handled too lazy
p = sqlparse.parse('(=)')[0]
assert isinstance(p.tokens[0], sql.Parenthesis)
assert not isinstance(p.tokens[0].tokens[1], sql.Comparison)
p = sqlparse.parse('(a=1)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.Comparison)
p = sqlparse.parse('(a>=1)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.Comparison)
def test_grouping_function():
p = sqlparse.parse('foo()')[0]
assert isinstance(p.tokens[0], sql.Function)
p = sqlparse.parse('foo(null, bar)')[0]
assert isinstance(p.tokens[0], sql.Function)
assert len(list(p.tokens[0].get_parameters())) == 2
def test_grouping_function_not_in():
# issue183
p = sqlparse.parse('in(1, 2)')[0]
assert len(p.tokens) == 2
assert p.tokens[0].ttype == T.Comparison
assert isinstance(p.tokens[1], sql.Parenthesis)
def test_in_comparison():
# issue566
p = sqlparse.parse('a in (1, 2)')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'a'
assert p.tokens[0].right.value == '(1, 2)'
def test_grouping_varchar():
p = sqlparse.parse('"text" Varchar(50) NOT NULL')[0]
assert isinstance(p.tokens[2], sql.Function)
def test_statement_get_type():
def f(sql):
return sqlparse.parse(sql)[0]
assert f('select * from foo').get_type() == 'SELECT'
assert f('update foo').get_type() == 'UPDATE'
assert f(' update foo').get_type() == 'UPDATE'
assert f('\nupdate foo').get_type() == 'UPDATE'
assert f('foo').get_type() == 'UNKNOWN'
def test_identifier_with_operators():
# issue 53
p = sqlparse.parse('foo||bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
# again with whitespaces
p = sqlparse.parse('foo || bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
def test_identifier_with_op_trailing_ws():
# make sure trailing whitespace isn't grouped with identifier
p = sqlparse.parse('foo || bar ')[0]
assert len(p.tokens) == 2
assert isinstance(p.tokens[0], sql.Operation)
assert p.tokens[1].ttype is T.Whitespace
def test_identifier_with_string_literals():
p = sqlparse.parse("foo + 'bar'")[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
# This test seems to be wrong. It was introduced when fixing #53, but #111
# showed that this shouldn't be an identifier at all. I'm leaving this
# commented in the source for a while.
# def test_identifier_string_concat():
# p = sqlparse.parse("'foo' || bar")[0]
# assert len(p.tokens) == 1
# assert isinstance(p.tokens[0], sql.Identifier)
def test_identifier_consumes_ordering():
# issue89
p = sqlparse.parse('select * from foo order by c1 desc, c2, c3')[0]
assert isinstance(p.tokens[-1], sql.IdentifierList)
ids = list(p.tokens[-1].get_identifiers())
assert len(ids) == 3
assert ids[0].get_name() == 'c1'
assert ids[0].get_ordering() == 'DESC'
assert ids[1].get_name() == 'c2'
assert ids[1].get_ordering() is None
def test_comparison_with_keywords():
# issue90
# in fact these are assignments, but for now we don't distinguish them
p = sqlparse.parse('foo = NULL')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == 'NULL'
# make sure it's case-insensitive
p = sqlparse.parse('foo = null')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
def test_comparison_with_floats():
# issue145
p = sqlparse.parse('foo = 25.5')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == '25.5'
def test_comparison_with_parenthesis():
# issue23
p = sqlparse.parse('(3 + 4) = 7')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
comp = p.tokens[0]
assert isinstance(comp.left, sql.Parenthesis)
assert comp.right.ttype is T.Number.Integer
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert p.tokens[0].right.value == "'bar'"
assert p.tokens[0].right.ttype == T.String.Single
def test_like_and_ilike_comparison():
def validate_where_clause(where_clause, expected_tokens):
assert len(where_clause.tokens) == len(expected_tokens)
for where_token, expected_token in zip(where_clause, expected_tokens):
expected_ttype, expected_value = expected_token
if where_token.ttype is not None:
assert where_token.match(expected_ttype, expected_value, regex=True)
else:
# Certain tokens, such as comparison tokens, do not define a ttype that can be
# matched against. For these tokens, we ensure that the token instance is of
# the expected type and has a value conforming to specified regular expression
import re
assert (isinstance(where_token, expected_ttype)
and re.match(expected_value, where_token.value))
[p1] = sqlparse.parse("select * from mytable where mytable.mycolumn LIKE 'expr%' limit 5;")
[p1_where] = [token for token in p1 if isinstance(token, sql.Where)]
validate_where_clause(p1_where, [
(T.Keyword, "where"),
(T.Whitespace, None),
(sql.Comparison, r"mytable.mycolumn LIKE.*"),
(T.Whitespace, None),
])
[p2] = sqlparse.parse(
"select * from mytable where mycolumn NOT ILIKE '-expr' group by othercolumn;")
[p2_where] = [token for token in p2 if isinstance(token, sql.Where)]
validate_where_clause(p2_where, [
(T.Keyword, "where"),
(T.Whitespace, None),
(sql.Comparison, r"mycolumn NOT ILIKE.*"),
(T.Whitespace, None),
])
def test_comparison_with_functions():
# issue230
p = sqlparse.parse('foo = DATE(bar.baz)')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == 'DATE(bar.baz)'
p = sqlparse.parse('DATE(foo.bar) = DATE(bar.baz)')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'DATE(foo.bar)'
assert p.tokens[0].right.value == 'DATE(bar.baz)'
p = sqlparse.parse('DATE(foo.bar) = bar.baz')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'DATE(foo.bar)'
assert p.tokens[0].right.value == 'bar.baz'
def test_comparison_with_typed_literal():
p = sqlparse.parse("foo = DATE 'bar.baz'")[0]
assert len(p.tokens) == 1
comp = p.tokens[0]
assert isinstance(comp, sql.Comparison)
assert len(comp.tokens) == 5
assert comp.left.value == 'foo'
assert isinstance(comp.right, sql.TypedLiteral)
assert comp.right.value == "DATE 'bar.baz'"
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
p = sqlparse.parse('{} foo in bar LOOP foobar END LOOP'.format(start))[0]
assert (len(p.tokens)) == 1
assert isinstance(p.tokens[0], sql.For)
def test_nested_for():
p = sqlparse.parse('FOR foo LOOP FOR bar LOOP END LOOP END LOOP')[0]
assert len(p.tokens) == 1
for1 = p.tokens[0]
assert for1.tokens[0].value == 'FOR'
assert for1.tokens[-1].value == 'END LOOP'
for2 = for1.tokens[6]
assert isinstance(for2, sql.For)
assert for2.tokens[0].value == 'FOR'
assert for2.tokens[-1].value == 'END LOOP'
def test_begin():
p = sqlparse.parse('BEGIN foo END')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Begin)
def test_keyword_followed_by_parenthesis():
p = sqlparse.parse('USING(somecol')[0]
assert len(p.tokens) == 3
assert p.tokens[0].ttype == T.Keyword
assert p.tokens[1].ttype == T.Punctuation
def test_nested_begin():
p = sqlparse.parse('BEGIN foo BEGIN bar END END')[0]
assert len(p.tokens) == 1
outer = p.tokens[0]
assert outer.tokens[0].value == 'BEGIN'
assert outer.tokens[-1].value == 'END'
inner = outer.tokens[4]
assert inner.tokens[0].value == 'BEGIN'
assert inner.tokens[-1].value == 'END'
assert isinstance(inner, sql.Begin)
def test_aliased_column_without_as():
p = sqlparse.parse('foo bar')[0].tokens
assert len(p) == 1
assert p[0].get_real_name() == 'foo'
assert p[0].get_alias() == 'bar'
p = sqlparse.parse('foo.bar baz')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
assert p.get_alias() == 'baz'
def test_qualified_function():
p = sqlparse.parse('foo()')[0].tokens[0]
assert p.get_parent_name() is None
assert p.get_real_name() == 'foo'
p = sqlparse.parse('foo.bar()')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
def test_aliased_function_without_as():
p = sqlparse.parse('foo() bar')[0].tokens[0]
assert p.get_parent_name() is None
assert p.get_real_name() == 'foo'
assert p.get_alias() == 'bar'
p = sqlparse.parse('foo.bar() baz')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
assert p.get_alias() == 'baz'
def test_aliased_literal_without_as():
p = sqlparse.parse('1 foo')[0].tokens
assert len(p) == 1
assert p[0].get_alias() == 'foo'
def test_grouping_as_cte():
p = sqlparse.parse('foo AS WITH apple AS 1, banana AS 2')[0].tokens
assert len(p) > 4
assert p[0].get_alias() is None
assert p[2].value == 'AS'
assert p[4].value == 'WITH'
|
andialbrecht/sqlparse
|
tests/test_grouping.py
|
Python
|
bsd-3-clause
| 22,563
|
# -*- coding: utf-8 -*-
import datetime as dt
from flask.ext.login import UserMixin
from fpage.database import db, CRUDMixin
from fpage.extensions import bcrypt
class User(UserMixin, CRUDMixin, db.Model):
__tablename__ = 'users'
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(80), unique=True, nullable=False)
password = db.Column(db.String, nullable=False) # The hashed password
created_at = db.Column(db.DateTime(), nullable=False)
first_name = db.Column(db.String(30), nullable=True)
last_name = db.Column(db.String(30), nullable=True)
active = db.Column(db.Boolean())
is_admin = db.Column(db.Boolean())
unread_count = db.Column(db.Integer, nullable=True)
def __init__(self, username=None, email=None, password=None,
first_name=None, last_name=None,
active=True, is_admin=False):
self.username = username
self.email = email
if password:
self.set_password(password)
self.active = active
self.is_admin = is_admin
self.created_at = dt.datetime.utcnow()
self.first_name = first_name
self.last_name = last_name
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, password):
return bcrypt.check_password_hash(self.password, password)
@property
def iso_time(self):
return self.created_at.isoformat()
@property
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return '<User "{username}">'.format(username=self.username)
|
Nikola-K/fpage
|
fpage/user/models.py
|
Python
|
apache-2.0
| 1,704
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import pktverify
from pktverify import packet_verifier, packet_filter, consts
from pktverify.consts import MA1
import config
import thread_cert
# Test description:
# The purpose of this test case is to verify that a Secondary BBR can take over
# forwarding of outbound multicast transmissions from the Thread Network when
# the Primary BBR disconnects. The Secondary in that case becomes Primary.
#
# Topology:
# ----------------(eth)------------------
# | |
# BR_1 (Leader) ---- BR_2
# | |
# | |
# ROUTER_1 -----------+
#
from pktverify.null_field import nullField
BR_1 = 1
BR_2 = 2
ROUTER_1 = 3
class MATN_09_FailureOfPrimaryBBROutboundMulticast(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR_1: {
'name': 'BR_1',
'is_otbr': True,
'allowlist': [BR_2, ROUTER_1],
'version': '1.2',
},
BR_2: {
'name': 'BR_2',
'is_otbr': True,
'allowlist': [BR_1, ROUTER_1],
'version': '1.2',
},
ROUTER_1: {
'name': 'Router_1',
'allowlist': [BR_1, BR_2],
'version': '1.2',
'partition_id': 1,
'network_id_timeout': 150,
},
}
def test(self):
br1 = self.nodes[BR_1]
br2 = self.nodes[BR_2]
router1 = self.nodes[ROUTER_1]
br1.start()
self.simulator.go(5)
self.assertEqual('leader', br1.get_state())
self.assertTrue(br1.is_primary_backbone_router)
router1.start()
self.simulator.go(10)
self.assertEqual('router', router1.get_state())
br2.start()
self.simulator.go(5)
self.assertEqual('router', br2.get_state())
self.assertFalse(br2.is_primary_backbone_router)
# 1. Router sends a ping packet to the multicast address, MA1,
# encapsulated in an MPL packet
self.assertFalse(router1.ping(MA1))
self.simulator.go(5)
# 4a. Switch off BR_1
br1.disable_backbone_router()
br1.thread_stop()
br1.interface_down()
self.simulator.go(180)
# 4b. BR_2 detects the missing Primary BBR and becomes the the Leader of
# the Thread Network.
self.assertEqual('disabled', br1.get_state())
self.assertEqual('leader', br2.get_state())
self.assertEqual('router', router1.get_state())
self.assertFalse(br1.is_primary_backbone_router)
self.assertTrue(br2.is_primary_backbone_router)
# 5. Router_1 sends a ping packet to the multicast address, MA1,
# encapsulated in an MPL packet.
self.assertFalse(router1.ping(MA1))
self.collect_ipaddrs()
self.collect_rloc16s()
self.collect_rlocs()
self.collect_leader_aloc(BR_2)
self.collect_extra_vars()
def verify(self, pv: pktverify.packet_verifier.PacketVerifier):
pkts = pv.pkts
vars = pv.vars
pv.summary.show()
logging.info(f'vars = {vars}')
# Ensure the topology is formed correctly
pv.verify_attached('Router_1', 'BR_1')
pv.verify_attached('BR_2')
# 1. Router_1 sends a ping packet to the multicast address, MA1,
# encapsulated in an MPL packet.
_pkt = pkts.filter_wpan_src64(vars['Router_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['Router_1_RLOC']) \
.filter_ping_request() \
.must_next()
initial_identifier = _pkt.icmpv6.echo.identifier
# 2. BR_1 forwards the multicast ping packet with multicast address,
# MA1, to the LAN.
pkts.filter_eth_src(vars['BR_1_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
with pkts.save_index():
pv.verify_attached('Router_1', 'BR_2')
# 4b. BR_2 detects the missing Primary BBR and becomes the Leader of the
# Thread Network, distributing its BBR dataset.
# Verify that Router_1 has received the new BBR Dataset from BR_2,
# where:
# • RLOC16 in Server TLV == The RLOC16 of BR_2
# All fields in the Service TLV contain valid values.
pkts.filter_wpan_src64(vars['BR_2']) \
.filter_mle() \
.filter(
lambda p: p.thread_nwd.tlv.server_16 is not nullField and
vars['BR_2_RLOC16'] in p.thread_nwd.tlv.server_16) \
.must_next()
# 5.Router_1 sends a ping packet to the multicast address, MA1,
# encapsulated in an MPL packet.
_pkt = pkts.filter_wpan_src64(vars['Router_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['Router_1_RLOC']) \
.filter_ping_request() \
.filter(lambda p: p.icmpv6.echo.identifier != initial_identifier) \
.must_next()
# 6. BR_2 forwards the multicast ping packet with multicast address,
# MA1, to the LAN.
pkts.filter_eth_src(vars['BR_2_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
if __name__ == '__main__':
unittest.main()
|
openthread/openthread
|
tests/scripts/thread-cert/border_router/MATN/MATN_09_DefaultBRMulticastForwarding.py
|
Python
|
bsd-3-clause
| 6,962
|
from model import *
import parse
import tensorflow as tf
import numpy as np
import os
import random
import time
data, vocab = parse.get_vocab('essay')
onehot = parse.embed_to_onehot(data, vocab)
# hyperparameters
## learning rate for both discriminator and generator
learning_rate = 0.0001
## sequence length for RNN models
length_sentence = 64
## batch size for RNN models
batch_size = 20
## how many iteration for discriminator and generator training session
epoch = 100
## how many iteration for sequence model to train
nested_epoch = 15
## hidden layers of RNN models
num_layers = 2
## size layers of RNN models
size_layer = 512
len_noise = 100
## initial tag length for generator part
tag_length = 5
possible_batch_id = range(len(data) - batch_size)
sess = tf.InteractiveSession()
model = Model(num_layers, size_layer, len(vocab), len_noise, length_sentence, learning_rate)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
sample_z = np.random.uniform(-1, 1, size = (1, length_sentence, len_noise))
start_tag = random.randint(0, len(data) - tag_length)
tag = data[start_tag: start_tag + tag_length]
DISC_LOSS, GEN_LOSS = [], []
for i in xrange(epoch):
last_time = time.time()
random_sample = np.random.uniform(-1, 1, size = (batch_size, length_sentence, len_noise))
batch_fake = np.zeros((batch_size, length_sentence, len(vocab)))
batch_true = np.zeros((batch_size, length_sentence, len(vocab)))
batch_id = random.sample(possible_batch_id, length_sentence)
for n in xrange(batch_size):
id1 = [k + n for k in batch_id]
batch_fake[n, :, :] = onehot[id1, :]
start_random = random.randint(0, len(data) - length_sentence)
batch_true[n, :, :] = onehot[start_random: start_random + length_sentence, :]
disc_loss, _ = sess.run([model.d_loss, model.d_train_opt], feed_dict = {model.noise: random_sample, model.fake_input: batch_fake, model.true_sentence: batch_true})
gen_loss, _ = sess.run([model.g_loss, model.g_train_opt], feed_dict = {model.noise: random_sample, model.fake_input: batch_fake, model.true_sentence: batch_true})
print 'epoch: ' + str(i + 1) + ', discriminator loss: ' + str(disc_loss) + ', generator loss: ' + str(gen_loss) + ', s/epoch: ' + str(time.time() - last_time)
for nested in xrange(nested_epoch):
seq_loss, _ = sess.run([model.seq_loss, model.seq_opt], feed_dict = {model.noise: random_sample, model.fake_input: batch_fake, model.true_sentence: batch_true})
print 'epoch: ' + str(nested + 1) + ', sequence loss: ' + str(seq_loss)
DISC_LOSS.append(disc_loss)
GEN_LOSS.append(gen_loss)
if (i + 1) % 5 == 0:
print 'checkpoint: ' + str(i + 1)
print 'generated sentence: '
tag = generate(sess, length_sentence, sample_z, model, tag, length_sentence, vocab)
print ' '.join(tag)
|
huseinzol05/Deep-Learning-Tensorflow
|
hybrid/GAN-Sentence/main.py
|
Python
|
mit
| 2,758
|
import os
import sys
import glob
import time
from subprocess import check_output
from collections import OrderedDict
from googleapiclient.discovery import build
from leviathan_config import GOOGLE_API_KEY, GOOGLE_CSE_ID, USER_AGENT, BASE_DIR
from utils import id_generator, timeout, printProgressBar
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
def link_extract(query, number):
try:
print "Extracting URLs from Google for following dork: " + query
discovery_id = id_generator()
results = google_search(query, GOOGLE_API_KEY, GOOGLE_CSE_ID, num=number)
i = 0
l = len(results)
for result in results:
printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50)
filename = os.path.join(BASE_DIR, 'assets', 'discovered', 'google_web_' + str(discovery_id) + '.txt')
with open(filename, "a") as links:
links.write(result['link'])
links.write("\n")
i += 1
sys.stdout.write(printProgressBar(i, l, prefix='Progress:', suffix='Complete', length=50))
time.sleep(0.1)
sys.stdout.flush()
print "\nFinished"
except Exception as e:
print "Link extraction failed! Probably your API limit exceeded"
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e)
def sqli_scan(discovery_id):
discovered_file = os.path.join(BASE_DIR, 'assets', 'discovered', 'google_web_' + str(discovery_id) + '.txt')
output_file = os.path.join(BASE_DIR, 'assets', 'compromised', 'google_web_%s.txt' % discovery_id)
try:
with open(discovered_file, "r") as urls_file:
url_list = urls_file.readlines()
for url in url_list:
try:
sqli_scan_by_url(url, output_file)
except KeyboardInterrupt:
break
except:
continue
except IOError:
print "There is no such file: %s" % discovered_file
print "\n"
print "Finished. Returning back.."
time.sleep(5)
@timeout()
def sqli_scan_by_url(url, output_file):
print "Trying: " + url
output = check_output(['python', 'dsss.py', '-u', url.rstrip(), '--user-agent', USER_AGENT],
cwd=BASE_DIR + '/lib')
lines = output.split('\n')
unique_lines = OrderedDict.fromkeys((line for line in lines if line))
for line in unique_lines:
try:
print "**SQLi Found: " + line
print "\n"
with open(output_file, "a") as cracked:
cracked.write(line)
cracked.write("\n")
except IOError:
print "There is no such file: %s" % output_file
def sqli_scan_all():
discovered_files_reg = os.path.join(BASE_DIR, 'assets', 'discovered', '*_web_*.txt')
discovered_files = glob.glob(discovered_files_reg)
for df in discovered_files:
discoveryid = df.split("_")[2].split(".")[0]
sqli_scan(discoveryid)
# sqli_scan_all()
# link_extract("inurl:.php?id=",11)
# sqli_scan("3646276")
|
leviathan-framework/leviathan
|
lib/sqli_scanner.py
|
Python
|
gpl-3.0
| 3,321
|
from __future__ import division
import abc
import numpy as np
import operator
from parameters import Parameter
"""Geometry classes."""
class Primitive(object):
"""Abstract class representing a primitive shape."""
__metaclass__ = abc.ABCMeta
points = None
"""Points associated with this primitive."""
name = None
"""Name of this primitive."""
def __init__(self, points, name):
"""Constructs a new primitive.
A list of points making up this primitive and a textual description
must be specified.
:param points: list of :class:`pygeosolve.geometry.Point` objects \
associated with this primitive
:param name: name of this primitive
"""
self.points = points
self.name = name
def __str__(self):
"""String representation of this primitive.
Returns a description of the :class:`~pygeosolve.geometry.Primitive` and
a list of its associated :class:`~pygeosolve.geometry.Point` objects.
:return: description of this :class:`~pygeosolve.geometry.Primitive` and
its :class:`~pygeosolve.geometry.Point` objects
"""
return "{0} with points {1}".format(self.name, ", ".join([str(point) for point in self.points]))
@property
def fixed(self):
return reduce(operator.and_, [point.fixed for point in self.points])
@fixed.setter
def fixed(self, fixed):
# set fixed status
[setattr(point, 'fixed', fixed) for point in self.points]
class Point(Primitive):
"""Represents a two-dimensional point in Euclidean space."""
x = None
"""The x-coordinate of this point."""
y = None
"""The y-coordinate of this point."""
def __init__(self, x, y):
"""Constructs a new point.
:param x: x-position
:param y: y-position
"""
# set positions
self.x = x
self.y = y
# call parent with self
super(Point, self).__init__([self], "Point")
def params(self):
"""Parameters associated with this problem.
:return: list of :class:`~pygeosolve.parameters.Parameter` objects
"""
# list of parameters
return [self.x, self.y]
def abs(self):
return np.sqrt(np.power(self.x, 2) + np.power(self.y, 2))
def __sub__(self, obj):
return Point(self.x.value - obj.x.value, self.y.value - obj.y.value)
@property
def fixed(self):
return self.x.fixed and self.y.fixed
@fixed.setter
def fixed(self, fixed):
self.x.fixed = fixed
self.y.fixed = fixed
def __str__(self):
"""String representation of this point.
:return: string representing (x, y) coordinates
"""
return "({0}, {1})".format(self.x, self.y)
class Line(Primitive):
"""Represents a line formed between two points in Euclidean space."""
def __init__(self, x1, y1, x2, y2):
"""Constructs a new Line object.
:param x1: start x-coordinate
:param y1: start y-coordinate
:param x2: end x-coordinate
:param y2: end y-coordinate
"""
# start point
start = Point(Parameter(x1), Parameter(y1))
# end point
end = Point(Parameter(x2), Parameter(y2))
# call parent with start and end points
super(Line, self).__init__([start, end], "Line")
def start(self):
"""Line start point.
:return: start :class:`~pygeosolve.geometry.Point` of \
:class:`~pygeosolve.geometry.Line`
"""
# first point represents the start
return self.points[0]
def end(self):
"""Line end point.
:return: end :class:`~pygeosolve.geometry.Point` of \
:class:`~pygeosolve.geometry.Line`
"""
# second point represents the end
return self.points[1]
def dx(self):
"""Difference in length between end and start x-values.
:return: difference in x-values
"""
# subtract start x-coordinate from end x-coordinate
return self.end().x.value - self.start().x.value
def dy(self):
"""Difference in length between end and start y-values.
:return: difference in y-values
"""
# subtract start y-coordinate from end y-coordinate
return self.end().y.value - self.start().y.value
def hypot(self):
"""Length of line hypotenuse of triangle formed by the x- and
y-coordinates of the start and end points. This represents the actual
length of the line.
:return: length of line
"""
# Pythagoras' theorem
return np.sqrt(self.dx() * self.dx() + self.dy() * self.dy())
|
SeanDS/pygeosolve
|
pygeosolve/geometry.py
|
Python
|
gpl-3.0
| 4,739
|
# -*- coding: utf-8; -*-
"""
Copyright (C) 2013 - Arnaud SOURIOUX <six.dsn@gmail.com>
Copyright (C) 2012 - Ozcan ESEN <ozcanesen~gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
from gi.repository import Gtk
from terra.ConfigManager import ConfigManager
class VteObjectContainer(Gtk.HBox):
counter = 0
def __init__(self, parent, bare=False, progname=None, pwd=None):
super(VteObjectContainer, self).__init__()
if bare:
return
self.counter = 0
self.parent = parent
self.vte_list = []
self.active_terminal = None
if not progname:
progname = ConfigManager.get_conf('general', 'start_shell_program')
import terra.VteObject
self.append_terminal(terra.VteObject.VteObject(), progname, pwd=pwd)
self.pack_start(self.active_terminal, True, True, 0)
self.show_all()
def close_page(self):
terminalwin = self.get_toplevel()
for button in terminalwin.buttonbox:
if button != terminalwin.radio_group_leader and button.get_active():
return terminalwin.page_close(None, button)
def append_terminal(self, term, progname, pwd=None, term_id=0):
term.id = self.handle_id(term_id)
term.set_pwd(self.active_terminal, pwd)
term.fork_process(progname)
self.active_terminal = term
self.vte_list.append(self.active_terminal)
def handle_id(self, setter=0):
if setter != 0:
ret_id = setter
else:
ret_id = self.counter
self.counter = max(self.counter, setter) + 1
return ret_id
|
Sixdsn/terra-terminal
|
terra/VteObjectContainer.py
|
Python
|
gpl-3.0
| 2,212
|
# -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + repr(tok)
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print "shlex: popping token " + repr(tok)
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print "shlex: token=" + repr(raw)
else:
print "shlex: token=EOF"
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in escape state"
# XXX what error should be raised here?
raise ValueError, "No escaped character"
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print "shlex: raw token=" + repr(result)
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/shlex.py
|
Python
|
bsd-3-clause
| 11,429
|
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.utils.encoding import force_text
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, six.string_types):
self.regex = re.compile(self.regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(force_text(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
value = force_text(value)
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
integer_validator = RegexValidator(
re.compile('^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = re.compile(
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (not domain_part in self.domain_whitelist and
not self.domain_regex.match(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if not self.domain_regex.match(domain_part):
raise ValidationError(self.message, code=self.code)
else:
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
validate_email = EmailValidator()
slug_re = re.compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(slug_re, _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
comma_separated_int_list_re = re.compile('^[\d,]+\Z')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _('Enter only digits separated by commas.'), 'invalid')
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
|
redhat-openstack/django
|
django/core/validators.py
|
Python
|
bsd-3-clause
| 8,147
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
import urllib
import os.path
import ConfigParser
class AuthURLOpener(urllib.FancyURLopener):
def __init__(self, user, pw):
self.username = user
self.password = pw
self.numTries = 0
urllib.FancyURLopener.__init__(self)
def prompt_user_passwd(self, host, realm):
if self.numTries == 0:
self.numTries = 1
return (self.username, self.password)
else:
return ('', '')
def openit(self, url):
self.numTries = 0
return urllib.FancyURLopener.open(self, url)
def processEpisode(dirName, nzbName=None, failed=False):
config = ConfigParser.ConfigParser()
configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessTV.cfg")
print "Loading config from", configFilename
if not os.path.isfile(configFilename):
print "ERROR: You need an autoProcessTV.cfg file - did you rename and edit the .sample?"
sys.exit(-1)
try:
fp = open(configFilename, "r")
config.readfp(fp)
fp.close()
except IOError, e:
print "Could not read configuration file: ", str(e)
sys.exit(1)
host = config.get("SickBeard", "host")
port = config.get("SickBeard", "port")
username = config.get("SickBeard", "username")
password = config.get("SickBeard", "password")
try:
ssl = int(config.get("SickBeard", "ssl"))
except (ConfigParser.NoOptionError, ValueError):
ssl = 0
try:
web_root = config.get("SickBeard", "web_root")
except ConfigParser.NoOptionError:
web_root = ""
params = {}
params['quiet'] = 1
params['dirName'] = dirName
if nzbName != None:
params['nzbName'] = nzbName
params['failed'] = failed
myOpener = AuthURLOpener(username, password)
if ssl:
protocol = "https://"
else:
protocol = "http://"
url = protocol + host + ":" + port + web_root + "/home/postprocess/processEpisode?" + urllib.urlencode(params)
print "Opening URL:", url
try:
urlObj = myOpener.openit(url)
except IOError, e:
print "Unable to open URL: ", str(e)
sys.exit(1)
result = urlObj.readlines()
for line in result:
print line
|
schumi2004/NOT_UPDATED_Sick-Beard-Dutch
|
autoProcessTV/autoProcessTV.py
|
Python
|
gpl-3.0
| 3,185
|
import json
from django.http import HttpResponse
from django.template import loader, Template
from django.utils.datastructures import SortedDict
from urllib import urlencode
from tiote import forms, utils
def browse(request):
conn_params = utils.fns.get_conn_params(request)
# row(s) deletion request handling
if request.method == 'POST' and request.GET.get('upd8') == 'delete':
return utils.db.rpr_query(conn_params, 'delete_row',
utils.fns.qd(request.GET), utils.fns.qd(request.POST))
# row(s) edit/updating request handling
elif request.method == 'POST' and request.GET.get('upd8') == 'edit':
return utils.fns.http_500('feature not yet implemented!')
tbl_data = utils.db.rpr_query(conn_params, 'browse_table',
utils.fns.qd(request.GET), utils.fns.qd(request.POST))
static_addr = utils.fns.render_template(request, '{{STATIC_URL}}')
browse_table = utils.fns.HtmlTable(
static_addr = static_addr,
props={'count':tbl_data['count'], 'keys': tbl_data['keys']['rows'],
'with_checkboxes': True, 'display_row': True,
},
store = {'total_count':tbl_data['total_count'], 'offset': tbl_data['offset'],
'limit': tbl_data['limit']
}, **tbl_data
)
if not browse_table.has_body():
return HttpResponse('<div class="undefined">[This table contains no entry]</div>')
browse_table_html = browse_table.to_element().replace('\n', '<br />') # html doesn't display newlines(\n)
table_options_html = utils.fns.table_options('data',
with_keys=bool(tbl_data['keys']['rows']), select_actions=True)
return HttpResponse(table_options_html + browse_table_html)
def structure(request):
conn_params = utils.fns.get_conn_params(request)
# column deletion
if request.method == 'POST' and request.GET.get('upd8'):
l = request.POST.get('whereToEdit').strip().split(';');
conditions = utils.fns.get_conditions(l)
q = ''
if request.GET.get('upd8') == 'edit':
q = 'drop_table'
return HttpResponse('update not yet implemented!')
elif request.GET.get('upd8') == 'delete':
q = 'delete_column'
query_data = {'db': request.GET.get('db'), 'table': request.GET.get('table'),
'conditions': conditions}
return utils.db.rpr_query(conn_params, q, query_data)
# view data
static_addr = utils.fns.render_template(request, '{{STATIC_URL}}')
subv = request.GET.get('subv', 'cols')
d = {}
_subnav = {'cols': utils.fns.ABBREVS['cols'], 'idxs':utils.fns.ABBREVS['idxs']}
if subv == 'cols':
d['title'] = _subnav[subv]
tbl_struct_data = utils.db.rpr_query(conn_params, 'table_structure', utils.fns.qd(request.GET))
columns_table = utils.fns.HtmlTable(attribs = {'id': 'tbl_columns'},
props = {'count': tbl_struct_data['count'], 'with_checkboxes': True,},
static_addr = static_addr, **tbl_struct_data
)
d['table'] = columns_table.to_element() if columns_table.has_body() \
else '<div class="undefined">[Table contains no columns]</div>'
elif subv == 'idxs':
d['title'] = _subnav[subv]
indexes_data = utils.db.rpr_query(conn_params, 'indexes', utils.fns.qd(request.GET))
indexes_table = utils.fns.HtmlTable(static_addr = static_addr,
props = {'count': indexes_data['count'], 'with_checkboxes': True},
**indexes_data
)
d['table'] = indexes_table.to_element() if indexes_table.has_body() \
else '<div class="undefined">[Table contains no indexes]</div>'
# generate arranged href
dest_url = SortedDict(); _d = {'sctn':'tbl','v':'structure'}
for k in _d: dest_url[k] = _d[k] # init this way to maintain order
for k in ('db', 'schm','tbl',):
if request.GET.get(k): dest_url[k] = request.GET.get(k)
_l = []
# generate navigation ul
for k in ('cols', 'idxs',):
_l.append('<li{0}><a href="{1}{2}">{3}<span>|</span></a></li>'.format(
' class="active"' if _subnav[k].lower() == d['title'].lower() else '',
'#'+urlencode(dest_url)+'&subv=', k, _subnav[k])
)
ret_str = '<div style="margin-bottom:-5px;"><ul class="subnav">{0}</ul></div>{table}'.format(
"".join(_l),**d)
return HttpResponse(ret_str)
def insert(request):
# make queries and inits
conn_params = utils.fns.get_conn_params(request)
tbl_struct_data = utils.db.rpr_query(conn_params, 'raw_table_structure', utils.fns.qd(request.GET))
# keys = ['column','type','null','default','character_maximum_length','numeric_precision','numeric_scale']
tbl_indexes_data = utils.db.rpr_query(conn_params, 'indexes', utils.fns.qd(request.GET))
if request.method == 'POST':
# the form is a submission so it doesn't require initialization from a database request
# every needed field would already be in the form (applies to forms for 'edit' view)
form = forms.InsertForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], data=request.POST)
# validate form
if form.is_valid():
ret = utils.db.insert_row(conn_params, utils.fns.qd(request.GET),
utils.fns.qd(request.POST))
return HttpResponse(json.dumps(ret))
else: # form contains error
ret = {'status': 'fail',
'msg': utils.fns.render_template(request,"tt_form_errors.html",
{'form': form}, is_file=True).replace('\n','')
}
return HttpResponse(json.dumps(ret))
form = forms.InsertForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'])
return utils.fns.response_shortcut(request, extra_vars={'form':form,}, template='form')
def edit(request):
# get METHOD is not allowed. the POST fields which was used to intialized the form
# - would not be availble. Redirect the page to the mother page ('v' of request.GET )
if request.method == 'GET':
h = HttpResponse(''); d = SortedDict()
for key in ('sctn', 'v', 'db', 'schm', 'tbl'):
if request.GET.get(key): d[key] = request.GET.get(key)
h.set_cookie('TT_NEXT', str( urlencode(d) ) )
return h
# inits and queries
conn_params = utils.fns.get_conn_params(request)
tbl_struct_data = utils.db.rpr_query(conn_params, 'raw_table_structure', utils.fns.qd(request.GET))
# keys = ['column','type','null','default','character_maximum_length','numeric_precision','numeric_scale']
tbl_indexes_data = utils.db.rpr_query(conn_params, 'indexes', utils.fns.qd(request.GET))
# generate the form(s)
if request.method == 'POST' and request.POST.get('where_stmt'):
# parse the POST structure and generate a list of dict.
l = request.POST.get('where_stmt').strip().split(';')
conditions = utils.fns.get_conditions(l)
# loop through the dict, request for the row which have _dict as its where clause
# - and used that information to bind the EditForm
_l_forms = []
for _dict in conditions:
single_row_data = utils.db.rpr_query(conn_params, 'get_single_row',
utils.fns.qd(request.GET), _dict
)
# make single row data a dict mapping of columns to rows
init_data = dict( zip( single_row_data['columns'], single_row_data['rows'][0] ) )
# create form and store in a the forms list
f = forms.EditForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], initial=init_data)
_l_forms.append(f)
return utils.fns.response_shortcut(request, extra_vars={'forms':_l_forms,}, template='multi_form')
# submissions of a form
else:
f = forms.EditForm(tbl_struct=tbl_struct_data, dialect=conn_params['dialect'],
tbl_indexes=tbl_indexes_data['rows'], data = request.POST)
if f.is_valid():
# two options during submission: update_row or insert_row
if f.cleaned_data['save_changes_to'] == 'insert_row':
# pretty straight forward (lifted from insert view above)
ret = utils.db.insert_row(conn_params, utils.fns.qd(request.GET),
f.cleaned_data)
return HttpResponse(json.dumps(ret))
else:
indexed_cols = utils.fns.parse_indexes_query(tbl_indexes_data['rows'])
ret = utils.db.update_row(conn_params, indexed_cols,
utils.fns.qd(request.GET), f.cleaned_data)
return HttpResponse(json.dumps(ret))
else:
# format and return form errors
ret = {'status': 'fail',
'msg': utils.fns.render_template(request,"tt_form_errors.html",
{'form': f}, is_file=True).replace('\n','')
}
return HttpResponse(json.dumps(ret))
# view router
def route(request):
if request.GET.get('subv') == 'edit':
return edit(request)
elif request.GET.get('v') == 'browse':
return browse(request)
elif request.GET.get('v') == 'structure':
return structure(request)
elif request.GET.get('v') == 'insert':
return insert(request)
else:
return utils.fns.http_500('malformed URL of section "table"')
|
joskid/tiote
|
tiote/views/tbl.py
|
Python
|
mit
| 9,570
|
# pylint: skip-file
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for visionml."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import unittest
import mock
import apache_beam as beam
from apache_beam.metrics import MetricsFilter
from apache_beam.typehints.decorators import TypeCheckError
# Protect against environments where vision lib is not available.
try:
from google.cloud.vision import ImageAnnotatorClient
from google.cloud import vision
from apache_beam.ml.gcp import visionml
except ImportError:
ImageAnnotatorClient = None
@unittest.skipIf(
ImageAnnotatorClient is None, 'Vision dependencies are not installed')
class VisionTest(unittest.TestCase):
def setUp(self):
self._mock_client = mock.Mock()
self._mock_client.batch_annotate_images.return_value = None
feature_type = vision.enums.Feature.Type.TEXT_DETECTION
self.features = [
vision.types.Feature(
type=feature_type, max_results=3, model="builtin/stable")
]
self.img_ctx = vision.types.ImageContext()
self.min_batch_size = 1
self.max_batch_size = 1
def test_AnnotateImage_URIs(self):
images_to_annotate = [
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg'
]
expected_counter = len(images_to_annotate)
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImage(
self.features,
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size))
result = p.run()
result.wait_until_finish()
read_filter = MetricsFilter().with_name('API Calls')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
self.assertTrue(read_counter.result == expected_counter)
def test_AnnotateImage_URI_with_side_input_context(self):
images_to_annotate = [
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg'
]
image_contexts = [
('gs://cloud-samples-data/vision/ocr/sign.jpg', self.img_ctx),
('gs://cloud-samples-data/vision/ocr/sign.jpg', self.img_ctx),
]
expected_counter = len(images_to_annotate)
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
p = beam.Pipeline()
context_side_input = (p | "Image contexts" >> beam.Create(image_contexts))
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImage(
self.features,
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size,
context_side_input=beam.pvalue.AsDict(context_side_input)))
result = p.run()
result.wait_until_finish()
read_filter = MetricsFilter().with_name('API Calls')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
self.assertTrue(read_counter.result == expected_counter)
def test_AnnotateImage_b64_content(self):
base_64_encoded_image = \
b'YmVnaW4gNjQ0IGNhdC12aWRlby5tcDRNICAgICgmOVQ+NyFNPCMwUi4uZmFrZV92aWRlb'
images_to_annotate = [
base_64_encoded_image,
base_64_encoded_image,
base_64_encoded_image,
]
expected_counter = len(images_to_annotate)
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImage(
self.features,
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size))
result = p.run()
result.wait_until_finish()
read_filter = MetricsFilter().with_name('API Calls')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
self.assertTrue(read_counter.result == expected_counter)
def test_AnnotateImageWithContext_URIs(self):
images_to_annotate = [
('gs://cloud-samples-data/vision/ocr/sign.jpg', self.img_ctx),
('gs://cloud-samples-data/vision/ocr/sign.jpg', None),
('gs://cloud-samples-data/vision/ocr/sign.jpg', self.img_ctx),
]
batch_size = 5
expected_counter = 1 # All images should fit in the same batch
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImageWithContext(
self.features,
min_batch_size=batch_size,
max_batch_size=batch_size))
result = p.run()
result.wait_until_finish()
read_filter = MetricsFilter().with_name('API Calls')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
self.assertTrue(read_counter.result == expected_counter)
def test_AnnotateImageWithContext_bad_input(self):
"""AnnotateImageWithContext should not accept images without context"""
images_to_annotate = [
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg'
]
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
with self.assertRaises(TypeCheckError):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImageWithContext(
self.features))
result = p.run()
result.wait_until_finish()
def test_AnnotateImage_bad_input(self):
images_to_annotate = [123456789, 123456789, 123456789]
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
with self.assertRaises(TypeCheckError):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImage(self.features))
result = p.run()
result.wait_until_finish()
def test_AnnotateImage_URIs_large_batch(self):
images_to_annotate = [
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
'gs://cloud-samples-data/vision/ocr/sign.jpg',
]
batch_size = 5
expected_counter = 3 # All 11 images should fit in 3 batches
with mock.patch.object(visionml,
'get_vision_client',
return_value=self._mock_client):
p = beam.Pipeline()
_ = (
p
| "Create data" >> beam.Create(images_to_annotate)
| "Annotate image" >> visionml.AnnotateImage(
self.features,
max_batch_size=batch_size,
min_batch_size=batch_size))
result = p.run()
result.wait_until_finish()
read_filter = MetricsFilter().with_name('API Calls')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
self.assertTrue(read_counter.result == expected_counter)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
iemejia/incubator-beam
|
sdks/python/apache_beam/ml/gcp/visionml_test.py
|
Python
|
apache-2.0
| 9,377
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.contrib import floating_ip_pools as fipp_v2
from nova.api.openstack.compute.plugins.v3 import floating_ip_pools as\
fipp_v21
from nova import context
from nova import network
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_floating_ip_pools(self, context):
return ['nova', 'other']
class FloatingIpPoolTestV21(test.NoDBTestCase):
floating_ip_pools = fipp_v21
url = '/v2/fake/os-floating-ip-pools'
def setUp(self):
super(FloatingIpPoolTestV21, self).setUp()
self.stubs.Set(network.api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
self.context = context.RequestContext('fake', 'fake')
self.controller = self.floating_ip_pools.FloatingIPPoolsController()
def test_translate_floating_ip_pools_view(self):
pools = fake_get_floating_ip_pools(None, self.context)
view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
self.assertIn('floating_ip_pools', view)
self.assertEqual(view['floating_ip_pools'][0]['name'],
pools[0])
self.assertEqual(view['floating_ip_pools'][1]['name'],
pools[1])
def test_floating_ips_pools_list(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req)
pools = fake_get_floating_ip_pools(None, self.context)
response = {'floating_ip_pools': [{'name': name} for name in pools]}
self.assertEqual(res_dict, response)
class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
floating_ip_pools = fipp_v2
class FloatingIpPoolSerializerTestV2(test.NoDBTestCase):
floating_ip_pools = fipp_v2
def test_index_serializer(self):
serializer = self.floating_ip_pools.FloatingIPPoolsTemplate()
text = serializer.serialize(dict(
floating_ip_pools=[
dict(name='nova'),
dict(name='other')
]))
tree = etree.fromstring(text)
self.assertEqual('floating_ip_pools', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('floating_ip_pool', tree[0].tag)
self.assertEqual('floating_ip_pool', tree[1].tag)
self.assertEqual('nova', tree[0].get('name'))
self.assertEqual('other', tree[1].get('name'))
|
vmthunder/nova
|
nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
|
Python
|
apache-2.0
| 3,128
|
# -*- coding: utf-8 -*-
from __future__ import division
from materials.ec2 import EC2_materials
import os
from miscUtils import LogMessages as lmsg
__author__= "Ana Ortega (AO_O) "
__copyright__= "Copyright 2015, AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "ana.ortega@ciccp.es "
fckDat=[12,16,20,25,30,35,40,45,50,55,60,70,80,90]
fcmCalc=[]
fctmCalc=[]
fctk005Calc=[]
fctk095Calc=[]
EcmCalc=[]
Epsc1Calc=[]
Epscu1Calc=[]
Epsc2Calc=[]
Epscu2Calc=[]
ExpNCalc=[]
Epsc3Calc=[]
Epscu3Calc=[]
for i in range(len(fckDat)):
name='C'+str(fckDat[i])
fck=-1*fckDat[i]*1e6 #[Pa][-]
concr= EC2_materials.EC2Concrete(name,fck,1.5)
fcm=concr.getFcm()/(-1e6)
fcmCalc.append(fcm)
fctm=round(concr.getFctm()/1e6,1)
fctmCalc.append(fctm)
fctk005=round(concr.getFctk005()/1e6,1)
fctk005Calc.append(fctk005)
fctk095=round(concr.getFctk095()/1e6,1)
fctk095Calc.append(fctk095)
concr.typeAggregate='Q'
Ecm=round(concr.getEcm()/1e9,0)
EcmCalc.append(Ecm)
Epsc1=round(concr.getEpsc1()*(-1e3),1)
Epsc1Calc.append(Epsc1)
Epscu1=round(concr.getEpscu1()*(-1e3),1)
Epscu1Calc.append(Epscu1)
Epsc2=round(concr.getEpsc2()*(-1e3),1)
Epsc2Calc.append(Epsc2)
Epscu2=round(concr.getEpscu2()*(-1e3),1)
Epscu2Calc.append(Epscu2)
ExpN=round(concr.getExpN(),1)
ExpNCalc.append(ExpN)
if concr.fckMPa()<=50:
Epsc3=round(concr.getEpsc3()*(-1e3),2)
else:
Epsc3=round(concr.getEpsc3()*(-1e3),1)
Epsc3Calc.append(Epsc3)
Epscu3=round(concr.getEpscu3()*(-1e3),1)
Epscu3Calc.append(Epscu3)
#Test Fcm
fcmDat=[20,24,28,33,38,43,48,53,58,63,68,78,88,98] #[MPa]
sqrErr= 0.0
for i in range(0,len(fcmDat)):
sqrErr+= (fcmDat[i]-fcmCalc[i])**2
fname= os.path.basename(__file__)+'-fcm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctm
fctmDat=[1.6,1.9,2.2,2.6,2.9,3.2,3.5,3.8,4.1,4.2,4.4,4.6,4.8,5.0] #[MPa]
sqrErr= 0.0
for i in range(0,len(fctmDat)):
sqrErr+= (fctmDat[i]-fctmCalc[i])**2
fname= os.path.basename(__file__)+'-fctm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctk005
fctk005Dat=[1.1,1.3,1.5,1.8,2.0,2.2,2.5,2.7,2.9,3.0,3.0,3.2,3.4,3.5] #[MPa]
sqrErr= 0.0
for i in range(0,len(fctk005Dat)):
sqrErr+= (fctk005Dat[i]-fctk005Calc[i])**2
fname= os.path.basename(__file__)+'-fctk005'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctk095
fctk095Dat=[2.0,2.5,2.9,3.3,3.8,4.2,4.6,4.9,5.3,5.5,5.7,6.0,6.3,6.6] #[MPa]
sqrErr= 0.0
for i in range(0,len(fctk095Dat)):
sqrErr+= (fctk095Dat[i]-fctk095Calc[i])**2
fname= os.path.basename(__file__)+'-fctk095'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Ecm
EcmDat=[27,29,30,31,33,34,35,36,37,38,39,41,42,44] #[GPa]
sqrErr= 0.0
for i in range(0,len(EcmDat)):
sqrErr+= (EcmDat[i]-EcmCalc[i])**2
fname= os.path.basename(__file__)+'-Ecm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epsc1
Epsc1Dat=[1.8,1.9,2.0,2.1,2.2,2.2,2.3,2.4,2.5,2.5,2.6,2.7,2.8,2.8] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc1Dat)):
sqrErr+= (Epsc1Dat[i]-Epsc1Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc1'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu1
Epscu1Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.2,3.0,2.8,2.8,2.8] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu1Dat)):
sqrErr+= (Epscu1Dat[i]-Epscu1Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu1'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epsc2
Epsc2Dat=[2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.2,2.3,2.4,2.5,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc2Dat)):
sqrErr+= (Epsc2Dat[i]-Epsc2Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc2'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu2
Epscu2Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.1,2.9,2.7,2.6,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu2Dat)):
sqrErr+= (Epscu2Dat[i]-Epscu2Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu2'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test ExpN
ExpNDat=[2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,1.8,1.6,1.4,1.4,1.4]
sqrErr= 0.0
for i in range(0,len(ExpNDat)):
sqrErr+= (ExpNDat[i]-ExpNCalc[i])**2
fname= os.path.basename(__file__)+'-ExpN'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epsc3
Epsc3Dat=[1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.8,1.9,2.0,2.2,2.3] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc3Dat)):
sqrErr+= (Epsc3Dat[i]-Epsc3Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc3'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu3
Epscu3Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.1,2.9,2.7,2.6,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu3Dat)):
sqrErr+= (Epscu3Dat[i]-Epscu3Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu3'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
|
lcpt/xc
|
verif/tests/materials/ec2/test_EC2Concrete.py
|
Python
|
gpl-3.0
| 5,205
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class ImageBuilderException(Exception):
pass
class ImageRateLimitedException(Exception):
"""Rate Limited request"""
class ImageSpecificationException(Exception):
pass
class ImageUploaderException(Exception):
pass
class ImageUploaderThreadException(Exception):
"""Conflict during thread processing"""
pass
class ImageNotFoundException(Exception):
pass
|
openstack/tripleo-common
|
tripleo_common/image/exception.py
|
Python
|
apache-2.0
| 991
|
# proxy module
from __future__ import absolute_import
from scimath.units.smart_unit import *
|
enthought/etsproxy
|
enthought/units/smart_unit.py
|
Python
|
bsd-3-clause
| 93
|
import pytest
from pymoku.instruments import Oscilloscope
from pymoku import _oscilloscope
try:
from unittest.mock import patch, ANY
except ImportError:
from mock import patch, ANY
@pytest.fixture
def dut(moku):
with patch('pymoku._frame_instrument.FrameBasedInstrument._set_running'):
i = Oscilloscope()
moku.deploy_instrument(i)
moku.reset_mock()
return i
def test_set_timebase(dut, moku):
'''
TODO Default test
'''
dut.set_timebase(-1.0, 1.0)
moku._write_regs.assert_called_with(ANY)
def test_set_samplerate(dut, moku):
'''
TODO Default test
'''
dut.set_samplerate(100e3)
dut.get_samplerate()
moku._write_regs.assert_called_with(ANY)
def test_set_xmode(dut, moku):
'''
TODO Default test
'''
dut.set_xmode('roll')
moku._write_regs.assert_called_with(ANY)
def test_set_precision_mode(dut, moku):
'''
TODO Default test
'''
dut.set_precision_mode(True)
dut.is_precision_mode()
moku._write_regs.assert_called_with(ANY)
def test_set_defaults(dut, moku):
'''
TODO Default test
'''
dut.set_defaults()
moku._write_regs.assert_called_with(ANY)
def test_set_trigger(dut, moku):
'''
TODO Default test
'''
dut.set_trigger('in1', 'rising', 0.0)
moku._write_regs.assert_called_with(ANY)
def test_set_source(dut, moku):
'''
TODO Default test
'''
dut.set_source(1, 'in1', 0.0)
moku._write_regs.assert_called_with(ANY)
@pytest.mark.parametrize('attr, value', [
('source_ch1', _oscilloscope._OSC_SOURCE_CH1),
('source_ch2', _oscilloscope._OSC_SOURCE_CH1),
('trig_ch', _oscilloscope._OSC_SOURCE_CH1),
('hf_reject', True),
('loopback_mode_ch1', _oscilloscope._OSC_LB_CLIP),
('loopback_mode_ch2', _oscilloscope._OSC_LB_CLIP),
('ain_mode', _oscilloscope._OSC_AIN_DDS),
('trig_precision', True),
('decimation_rate', 1),
('auto_timer', 1.0),
('auto_holdoff', 0),
])
def test_attributes(dut, moku, attr, value):
'''
TODO Default test
'''
setattr(dut, attr, value)
dut.commit()
moku._write_regs.assert_called_with(ANY)
|
liquidinstruments/pymoku
|
tests/test_oscilloscope.py
|
Python
|
mit
| 2,175
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2020-03-22 00:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FlickrImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('voucher_image', models.URLField(blank=True, help_text='URLs of the Flickr page.')),
('thumbnail', models.URLField(help_text='URLs for the small sized image from Flickr.')),
('flickr_id', models.CharField(help_text='ID numbers from Flickr for our photo.', max_length=100)),
('image_file', models.ImageField(blank=True, help_text='Placeholder for image file so we can send it to Flickr. The file has been deleted right after upload.', upload_to='')),
],
options={
'verbose_name_plural': 'Flickr Images',
},
),
migrations.CreateModel(
name='Genes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gene_code', models.CharField(max_length=100)),
('genetic_code', models.PositiveSmallIntegerField(help_text='Translation table (as number). See <a href="http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi">http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi</a>', null=True)),
('length', models.PositiveSmallIntegerField(help_text='Number of base pairs', null=True)),
('description', models.CharField(blank=True, help_text='Long gene name.', max_length=255)),
('reading_frame', models.PositiveSmallIntegerField(help_text='Either 1, 2 or 3', null=True)),
('notes', models.TextField(blank=True)),
('aligned', models.CharField(choices=[('yes', 'yes'), ('no', 'no'), ('notset', 'notset')], default='notset', max_length=6)),
('intron', models.CharField(blank=True, max_length=255)),
('prot_code', models.CharField(choices=[('yes', 'yes'), ('no', 'no'), ('notset', 'notset')], default='notset', max_length=6)),
('gene_type', models.CharField(blank=True, help_text='Nuclear, mitochondrial.', max_length=255)),
('time_created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Genes',
},
),
migrations.CreateModel(
name='GeneSets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('geneset_name', models.CharField(max_length=75)),
('geneset_creator', models.CharField(max_length=75)),
('geneset_description', models.CharField(blank=True, max_length=140)),
('geneset_list', models.TextField(help_text='As items separated by linebreak.')),
],
options={
'verbose_name_plural': 'Gene sets',
},
),
migrations.CreateModel(
name='LocalImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('voucher_image', models.ImageField(blank=True, help_text='voucher photo.', upload_to='')),
],
options={
'verbose_name_plural': 'Local Images',
},
),
migrations.CreateModel(
name='Primers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primer_f', models.CharField(blank=True, max_length=100)),
('primer_r', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Sequences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gene_code', models.CharField(db_index=True, max_length=100)),
('sequences', models.TextField(blank=True)),
('accession', models.CharField(blank=True, db_index=True, max_length=100)),
('lab_person', models.CharField(blank=True, db_index=True, max_length=100)),
('time_created', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('time_edited', models.DateTimeField(auto_now=True, db_index=True, null=True)),
('notes', models.TextField(blank=True, db_index=True)),
('genbank', models.NullBooleanField(db_index=True)),
('total_number_bp', models.IntegerField(blank=True, db_index=True, null=True)),
('number_ambiguous_bp', models.IntegerField(blank=True, db_index=True, null=True)),
],
options={
'verbose_name_plural': 'Sequences',
},
),
migrations.CreateModel(
name='TaxonSets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('taxonset_name', models.CharField(max_length=75)),
('taxonset_creator', models.CharField(max_length=75)),
('taxonset_description', models.CharField(blank=True, max_length=140)),
('taxonset_list', models.TextField(help_text='As items separated by linebreak.')),
],
options={
'verbose_name_plural': 'Taxon sets',
},
),
migrations.CreateModel(
name='Vouchers',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('code', models.CharField(db_index=True, help_text='Voucher code.', max_length=300, primary_key=True, serialize=False, unique=True)),
('orden', models.TextField(blank=True, db_index=True)),
('superfamily', models.TextField(blank=True, db_index=True)),
('family', models.TextField(blank=True, db_index=True)),
('subfamily', models.TextField(blank=True, db_index=True)),
('tribe', models.TextField(blank=True, db_index=True)),
('subtribe', models.TextField(blank=True, db_index=True)),
('genus', models.TextField(blank=True, db_index=True)),
('species', models.TextField(blank=True, db_index=True)),
('subspecies', models.TextField(blank=True, db_index=True)),
('country', models.TextField(blank=True, db_index=True)),
('specific_locality', models.TextField(blank=True, db_index=True, help_text='Locality of origin for this specimen.')),
('type_species', models.CharField(choices=[('unknown', 'unknown'), ('yes', 'yes'), ('not', 'not')], db_index=True, help_text='Is this a type species?', max_length=100)),
('latitude', models.FloatField(blank=True, db_index=True, null=True)),
('longitude', models.FloatField(blank=True, db_index=True, null=True)),
('max_altitude', models.IntegerField(blank=True, db_index=True, help_text='Enter altitude in meters above sea level.', null=True)),
('min_altitude', models.IntegerField(blank=True, db_index=True, help_text='Enter altitude in meters above sea level.', null=True)),
('collector', models.TextField(blank=True, db_index=True)),
('date_collection', models.CharField(blank=True, db_index=True, default='', help_text='Enter date in format YYYY-mm-dd', max_length=10, verbose_name='Date collection start')),
('date_collection_end', models.CharField(blank=True, db_index=True, help_text='Optional. Enter date in format YYYY-mm-dd', max_length=10)),
('extraction', models.TextField(blank=True, db_index=True, help_text='Number of extraction event.')),
('extraction_tube', models.TextField(blank=True, db_index=True, help_text='Tube containing DNA extract.')),
('date_extraction', models.DateField(blank=True, db_index=True, null=True)),
('extractor', models.TextField(blank=True, db_index=True)),
('voucher_locality', models.TextField(blank=True, db_index=True)),
('published_in', models.TextField(blank=True, db_index=True, null=True)),
('notes', models.TextField(blank=True, db_index=True, null=True)),
('edits', models.TextField(blank=True, null=True)),
('latest_editor', models.TextField(blank=True, db_index=True, null=True)),
('hostorg', models.TextField(blank=True, db_index=True, help_text='Hostplant or other host.')),
('sex', models.CharField(blank=True, choices=[('male', 'male'), ('female', 'female'), ('larva', 'larva'), ('worker', 'worker'), ('queen', 'queen'), ('unknown', 'unknown')], db_index=True, max_length=100)),
('voucher', models.CharField(blank=True, choices=[('spread', 'spread'), ('in envelope', 'in envelope'), ('only photo', 'only photo'), ('no voucher', 'no voucher'), ('destroyed', 'destroyed'), ('lost', 'lost'), ('unknown', 'unknown')], db_index=True, help_text='Voucher status.', max_length=100)),
('voucher_code', models.TextField(blank=True, db_index=True, help_text='Alternative code of voucher specimen.')),
('code_bold', models.TextField(blank=True, db_index=True, help_text='Optional code for specimens kept in the BOLD database.')),
('determined_by', models.TextField(blank=True, db_index=True, help_text='Person that identified the taxon for this specimen.')),
('author', models.TextField(blank=True, db_index=True, help_text='Person that described this taxon.')),
],
options={
'verbose_name_plural': 'Vouchers',
},
),
migrations.AddField(
model_name='sequences',
name='code',
field=models.ForeignKey(help_text='This is your voucher code.', on_delete=django.db.models.deletion.CASCADE, to='public_interface.Vouchers'),
),
migrations.AddField(
model_name='primers',
name='for_sequence',
field=models.ForeignKey(help_text='relation to Sequences table with reference for code and gene_code.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='public_interface.Sequences'),
),
migrations.AddField(
model_name='localimages',
name='voucher',
field=models.ForeignKey(help_text='Relation with id of voucher.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='public_interface.Vouchers'),
),
migrations.AddField(
model_name='flickrimages',
name='voucher',
field=models.ForeignKey(help_text='Relation with id of voucher. Save as lower case.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='public_interface.Vouchers'),
),
]
|
carlosp420/VoSeq
|
public_interface/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 11,529
|
from decimal import *
from matrix import Matrix
import copy
matrnr = 12345678
x7 = Decimal(int((matrnr / 1) % 10))
x6 = Decimal(int((matrnr / 10) % 10))
x5 = Decimal(int((matrnr / 100) % 10))
x4 = Decimal(int((matrnr / 1000) % 10))
x3 = Decimal(int((matrnr / 10000) % 10))
x2 = Decimal(int((matrnr / 100000) % 10))
x1 = Decimal(int((matrnr / 1000000) % 10))
t = Matrix(14,14)
t[0] = 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x7
t[1] = 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x6, 0
t[2] = 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, x5, 0, 0
t[3] = 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, x4, 0, 0, 0
t[4] = 0, 0, 0, 0, 10, 0, 0, 0, 0, x3, 0, 0, 0, 0
t[5] = 0, 0, 0, 0, 0, 10, 0, 0, x2, 0, 0, 0, 0, 0
t[6] = 0, 0, 0, 0, 0, 0, 10, x1, 0, 0, 0, 0, 0, 0
t[7] = 0, 0, 0, 0, 0, 0, x7, 10, 0, 0, 0, 0, 0, 0
t[8] = 0, 0, 0, 0, 0, x6, 0, 0, 10, 0, 0, 0, 0, 0
t[9] = 0, 0, 0, 0, x5, 0, 0, 0, 0, 10, 0, 0, 0, 0
t[10] = 0, 0, 0, x4, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0
t[11] = 0, 0, x3, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0
t[12] = 0, x2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0
t[13] = x1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10
(c, l, r, p) = t.getLR()
print(c)
print(l)
print(r)
print(p)
print(l*r)
print(p*t)
print(p * t == l * r)
|
DominikHorn/MatrixCalc
|
main.py
|
Python
|
mit
| 1,197
|
from __future__ import print_function
import sys
import traceback
import urllib
import urllib2
import json
import re
import UserDict
from bs4 import BeautifulSoup
from untwisted.magic import sign
from runtime import later
import util
import runtime
import identity
STATE_FILE = 'state/qdbs.json'
BS4_PARSER = 'html5lib'
MAX_REPORT = 4
MAX_QUOTE_LEN = 300
TICK_PERIOD_S = 60
link = util.LinkSet()
def install(bot):
link.install(bot)
bot.drive('QDBS_TICK', bot, log_level=2)
install, uninstall = util.depend(install, link.uninstall, 'identity')
#===============================================================================
def read_state():
try:
with open(STATE_FILE, 'r') as file:
return util.recursive_encode(json.load(file), 'utf-8')
except IOError as e:
if e.errno != 2: raise
except:
traceback.print_exc()
return dict()
def write_state(wstate):
data = json.dumps(wstate, indent=4, ensure_ascii=False)
with open(STATE_FILE, 'w') as file:
file.write(data)
state = read_state()
#===============================================================================
@link('QDBS_TICK')
def h_qdbs_tick(bot, log_level=None):
yield Private.class_refresh(bot)
yield Public.class_refresh(bot)
yield runtime.sleep(TICK_PERIOD_S)
yield sign('QDBS_TICK', bot, log_level=log_level)
def format_quote(quote):
quote = re.sub(r'[\r\n]+', ' ', quote)
if len(quote) > MAX_QUOTE_LEN:
quote = quote[:MAX_QUOTE_LEN-5] + '(...)'
return quote
#===============================================================================
class EmptyDict(UserDict.DictMixin):
def __getitem__(self, key):
raise KeyError
def __delitem__(self, key):
pass
def __setitem__(self, key, val):
pass
class Configuration(object):
cache = EmptyDict()
def __init__(self, raw_entry):
for key, val in raw_entry.__dict__.iteritems():
setattr(self, key, val)
@classmethod
def read_conf(cls):
try:
return util.table(cls.CONF_FILE, cls.__name__ + 'ConfEntry')
except IOError as e:
if e.errno != 2: raise
except:
traceback.print_exc()
return dict()
@classmethod
def class_init(cls):
cls.cache = dict()
cls.entries = []
for raw_entry in cls.read_conf():
entry = cls(raw_entry)
cls.entries.append(entry)
@classmethod
@util.msub(link, 'qdbs.Configuration.class_refresh')
def class_refresh(cls, bot):
cls.cache.clear()
for entry in cls.entries:
yield entry.refresh(bot)
#===============================================================================
class Private(Configuration):
CONF_FILE = 'conf/qdbs_private.py'
__slots__ = ('access_name', 'qdb_username', 'qdb_password', 'admin_url',
'remote_admin_url')
@util.msub(link, 'qdbs.Private.refresh')
def refresh(self, bot):
try:
quotes = self.quotes()
if not quotes: return
url_state = state.get(self.admin_url, {})
name_state = url_state.get(self.access_name.lower(), {})
last_quote = name_state.get('last_quote')
nicks = yield identity.enum_access(bot, self.access_name)
if not nicks: return
for nick in nicks:
for qid, quote in sorted(quotes):
if qid > last_quote:
fquote = format_quote(quote)
msg = '[QdbS] New quote #%d <%s>: "%s"' % (
qid, self.remote_admin_url, fquote)
if type(msg) is unicode:
msg = msg.encode('utf8')
bot.send_msg(nick, msg)
hm = yield identity.get_hostmask(bot, nick)
yield later(sign('PROXY_MSG', bot, None, hm, fquote, quiet=True))
last_quote = max(
last_quote, max(qid for (qid, quote) in quotes))
name_state['last_quote'] = last_quote
url_state[self.access_name.lower()] = name_state
state[self.admin_url] = url_state
write_state(state)
except:
traceback.print_exc()
def quotes(self):
if self.admin_url in self.cache:
return self.cache[self.admin_url]
quotes = []
soup = self.soup()
add_quotes = soup.find_all('a', {'title': 'Add Quote'})
if add_quotes:
for add_quote in add_quotes:
qid = int(re.search(r'q=(\d+)', add_quote.get('href')).group(1))
body = add_quote.find_parent('table').find('td', {'class': 'body'})
quotes.append((qid, body.text.strip()))
else:
for tag in soup.find_all('td', {'class': 'title'}):
if tag.text.strip() == 'There are no new quotes!':
break
else:
print(soup.encode('utf-8'), file=sys.stderr)
raise Exception('Unexpected QdbS admin page format.')
self.cache[self.admin_url] = quotes
return quotes
def soup(self):
stream = util.ext_urlopen(urllib2.Request(self.admin_url, headers={
'Cookie': 'qdb_username=%s; qdb_password=%s' % (
self.qdb_username, self.qdb_password)}))
encoding = stream.info().getparam('charset')
return BeautifulSoup(stream, BS4_PARSER, from_encoding=encoding)
Private.class_init()
#===============================================================================
class Public(Configuration):
CONF_FILE = 'conf/qdbs_public.py'
__slots__ = ('channel', 'index_url', 'remote_index_url')
@util.msub(link, 'qdbs.Public.refresh')
def refresh(self, bot):
try:
quotes, title = self.quotes_title()
url_state = state.get(self.index_url, {})
chan_state = url_state.get(self.channel.lower(), {})
last_quote = chan_state.get('last_quote')
quotes = sorted(
(qid, quote) for (qid, quote) in quotes if qid > last_quote)
sample = quotes if len(quotes) <= MAX_REPORT else \
quotes[:MAX_REPORT-1]
for qid, quote in sample:
quote_url = '%s?%s' % (self.remote_index_url, qid)
fquote = format_quote(quote)
msg = '%s: new quote added: %s "%s"' % (title, quote_url, fquote)
bot.send_msg(self.channel, msg)
yield later(sign('PROXY_MSG', bot, None, self.channel, fquote,
quiet=True))
if len(quotes) > len(sample):
msg = '%s: ...and %d others. See: <%s>.' % (
title, len(quotes)-len(sample), self.remote_index_url)
bot.send_msg(self.channel, msg)
if quotes:
last_quote = max(
last_quote, max(qid for (qid, quote) in quotes))
chan_state['last_quote'] = last_quote
url_state[self.channel.lower()] = chan_state
state[self.index_url] = url_state
write_state(state)
except:
traceback.print_exc()
def quotes_title(self):
if self.index_url in self.cache:
return self.cache[self.index_url]
quotes = []
soup = self.soup()
heading = soup.find('td', {'class': 'heading'})
if heading is None:
print(soup.encode('utf-8'), file=sys.stderr)
raise Exception('Unexpected QdbS index page format.')
qdb_title = heading.text.strip()
rate_links = soup.find_all('a', {'title': 'Rate as good'})
for rate_link in rate_links:
qid = int(re.search(r'q=(\d+)', rate_link.get('href')).group(1))
body = rate_link.find_parent('table').find('td', {'class': 'body'})
quotes.append((qid, body.text.strip()))
self.cache[self.index_url] = quotes, qdb_title
return quotes, qdb_title
def soup(self):
stream = util.ext_urlopen(self.index_url)
encoding = stream.info().getparam('charset')
return BeautifulSoup(stream, BS4_PARSER, from_encoding=encoding)
Public.class_init()
|
joodicator/PageBot
|
page/qdbs.py
|
Python
|
lgpl-3.0
| 8,387
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
print('{1}:{0}'.format(2, 1))
# out: 1:2
# 精度控制
print('[{0:.3}]'.format(1/3))
# out: [0.333]
# 宽度控制
print('[{0:7}]'.format('hello'))
# out: [hello ]
# <codecell>
# 居左
print('[{0:<7.3}]'.format(1/3))
# out: [0.333 ]
# <codecell>
# 居右
print('[{0:>7.3}]'.format(1/3))
# out: [ 0.333]
# <codecell>
# 居中
print('[{0:^7.3}]'.format(1/3))
# out: [ 0.333 ]
# <codecell>
# 补全(左补0)
print('[{0:0>7}]'.format(1))
print('[{0:{1}>7}]'.format(1, 0))
# out: [0000001]
# <codecell>
# 补全(右补0)
print('[{0:0<7}]'.format(1))
print('[{0:{1}<7}]'.format(1, 0))
# out: [1000000]
# <codecell>
# 中文空格对齐
blog = {'1':'中国石油大学','2':'浙江大学','3':'南京航空航天大学'}
print('不对齐')
print('{0:^4}\t\t{1:^8}'.format('序号', '名称'))
for no, name in blog.items():
print('{0:^4}\t\t{1:^8}'.format(no, name))
# out:
# 序号 名称
# 1 中国石油大学
# 2 浙江大学
# 3 南京航空航天大学
print('对齐')
print('{0:^4}\t\t{1:{2}^8}'.format('序号', '名称', chr(12288)))
for no, name in blog.items():
print('{0:^4}\t\t{1:{2}^8}'.format(no, name, chr(12288)))
# out:
# 序号 名称
# 1 中国石油大学
# 2 浙江大学
# 3 南京航空航天大学
doc_tmpl = r"""\documentclass[english]{article}
\usepackage{graphicx}
\usepackage[paperheight=%fin,paperwidth=%fin]{geometry}
\usepackage{psfrag}
\begin{document}
\input{%s}
\includegraphics{%s}
\end{document} """
print(doc_tmpl % (1.0, 2.0, "aa", "bb"))
|
qrsforever/workspace
|
python/learn/base/string/format_.py
|
Python
|
mit
| 1,644
|
""" Ban factory """
from smserver import models
from test.factories import base
class BanFactory(base.BaseFactory):
""" Classic user name """
class Meta(base.BaseMeta):
model = models.Ban
fixed = False
|
ningirsu/stepmania-server
|
test/factories/ban_factory.py
|
Python
|
mit
| 226
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
from oslo.config import cfg
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.TestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertTrue(isinstance(self.driver.driver,
fake.FakeDriver))
self.assertTrue(isinstance(self.driver.vif_driver,
fake.FakeVifDriver))
self.assertTrue(isinstance(self.driver.volume_driver,
fake.FakeVolumeDriver))
self.assertTrue(isinstance(self.driver.firewall_driver,
fake.FakeFirewallDriver))
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
result['instance'] = utils.get_test_instance()
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertTrue(isinstance(stats, list))
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertEqual(row['instance_uuid'], None)
self.assertEqual(row['instance_name'], None)
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
|
DirectXMan12/nova-hacking
|
nova/tests/virt/baremetal/test_driver.py
|
Python
|
apache-2.0
| 14,537
|
import os.path
import numpy
from scipy import interpolate
import inspect
# Hardcode the paths of the delay and gain files
file_dir=os.path.dirname(inspect.getfile(inspect.currentframe()))
MEAS_DELAYS=os.path.join(file_dir,'meas_delays.txt')
MEAS_GAINS=os.path.join(file_dir,'meas_gain_db.txt')
##################################
def delayset2delaylines(delayset):
'''
Calculate an array of 5 delay line flags based on the delay setting.
NOT VECTOR.
'''
# See if we have a valid delay setting
if delayset < 0 or delayset > 63:
raise ValueError("Invalid Delay Setting %s"%`delayset`)
# Delay settings with the MSB set are turned off, return None
if delayset > 31:
return None
# Iterate through delaylines
t=delayset
dlines=[False]*5
for i in range(4,-1,-1):
if t >= 2**i:
t=t-2**i
dlines[i]=True
return dlines
##################################
def get_delay_length(delayset,freq,delayfile=None):
'''
Get a delay length (in seconds) from a delay set
'''
global MEAS_DELAYS
if delayfile is None:
delayfile=MEAS_DELAYS
if not os.path.exists(delayfile):
raise ValueError("Delay File %s does not exist"%delayfile)
# Read in the array from the delay file
t=[]
f=open(delayfile)
for line in f:
t.append(map(float,line.split()))
darr=numpy.array(t)
f_freqs=darr[:,0] # Array of frequencies in the delay file.
# Columns 1 through 5 in the delay file correspond to those delay lines
delayset=numpy.array(delayset)
outdelay=numpy.zeros(delayset.size,dtype='float64')
for j in range(delayset.size):
# stupid numpy zero-length array type....
if delayset.size == 1:
dlines=delayset2delaylines(delayset)
else:
dlines=delayset2delaylines(delayset[j])
for i in range(5):
# Check if each delay line is on. If it is, interpolate the file
# to find the amount of delay to add
if dlines[i]:
ifunc=interpolate.splrep(f_freqs,darr[:,i+1],s=0)
outdelay[j]=outdelay[j]+interpolate.splev(freq,ifunc,der=0)
return outdelay
##################################
def get_delay_gains(delayset,freq,delayfile=None):
'''
Get a delay gains (linear scale) from a delay set
'''
global MEAS_GAINS
if delayfile is None:
gainfile=MEAS_GAINS
if not os.path.exists(gainfile):
raise ValueError("Gain File %s does not exist"%gainfile)
# Read in the array from the delay file
t=[]
f=open(gainfile)
for line in f:
t.append(map(float,line.split()))
garr=numpy.array(t)
f_freqs=garr[:,0] # Array of frequencies in the delay file.
# Columns 1 through 5 in the delay file correspond to those delay lines
delayset=numpy.array(delayset)
outgain=numpy.zeros(delayset.size,dtype='float64')
for j in range(delayset.size):
# stupid numpy zero-length array type....
if delayset.size == 1:
dlines=delayset2delaylines(delayset)
else:
dlines=delayset2delaylines(delayset[j])
for i in range(5):
# Check if each delay line is on. If it is, interpolate the file
# to find the amount of delay to add
if dlines[i]:
ifunc=interpolate.splrep(f_freqs,garr[:,i+1],s=0)
outgain[j]=outgain[j]+interpolate.splev(freq,ifunc,der=0)
return (10.0**(outgain/20.0))
|
ryandougherty/mwa-capstone
|
MWA_Tools/mwapy/pb/measured_beamformer.py
|
Python
|
gpl-2.0
| 3,586
|
## simplexml.py based on Mattew Allum's xmlstream.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: simplexml.py,v 1.27 2005/04/30 07:20:27 snakeru Exp $
"""Simplexml module provides xmpppy library with all needed tools to handle XML nodes and XML streams.
I'm personally using it in many other separate projects. It is designed to be as standalone as possible."""
import xml.parsers.expat
def XMLescape(txt):
"""Returns provided string with symbols & < > " replaced by their respective XML entities."""
# replace also FORM FEED and ESC, because they are not valid XML chars
return txt.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """).replace(u'\x0C', "").replace(u'\x1B', "")
ENCODING='utf-8'
def ustr(what):
"""Converts object "what" to unicode string using it's own __str__ method if accessible or unicode method otherwise."""
if isinstance(what, unicode): return what
try: r=what.__str__()
except AttributeError: r=str(what)
if not isinstance(r, unicode): return unicode(r,ENCODING)
return r
class Node(object):
""" Node class describes syntax of separate XML Node. It have a constructor that permits node creation
from set of "namespace name", attributes and payload of text strings and other nodes.
It does not natively support building node from text string and uses NodeBuilder class for that purpose.
After creation node can be mangled in many ways so it can be completely changed.
Also node can be serialised into string in one of two modes: default (where the textual representation
of node describes it exactly) and "fancy" - with whitespace added to make indentation and thus make
result more readable by human.
Node class have attribute FORCE_NODE_RECREATION that is defaults to False thus enabling fast node
replication from the some other node. The drawback of the fast way is that new node shares some
info with the "original" node that is changing the one node may influence the other. Though it is
rarely needed (in xmpppy it is never needed at all since I'm usually never using original node after
replication (and using replication only to move upwards on the classes tree).
"""
FORCE_NODE_RECREATION=0
def __init__(self, tag=None, attrs={}, payload=[], parent=None, nsp=None, node_built=False, node=None):
""" Takes "tag" argument as the name of node (prepended by namespace, if needed and separated from it
by a space), attrs dictionary as the set of arguments, payload list as the set of textual strings
and child nodes that this node carries within itself and "parent" argument that is another node
that this one will be the child of. Also the __init__ can be provided with "node" argument that is
either a text string containing exactly one node or another Node instance to begin with. If both
"node" and other arguments is provided then the node initially created as replica of "node"
provided and then modified to be compliant with other arguments."""
if node:
if self.FORCE_NODE_RECREATION and isinstance(node, Node):
node=str(node)
if not isinstance(node, Node):
node=NodeBuilder(node,self)
node_built = True
else:
self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = node.name,node.namespace,{},[],[],node.parent,{}
for key in node.attrs.keys(): self.attrs[key]=node.attrs[key]
for data in node.data: self.data.append(data)
for kid in node.kids: self.kids.append(kid)
for k,v in node.nsd.items(): self.nsd[k] = v
else: self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = 'tag','',{},[],[],None,{}
if parent:
self.parent = parent
self.nsp_cache = {}
if nsp:
for k,v in nsp.items(): self.nsp_cache[k] = v
for attr,val in attrs.items():
if attr == 'xmlns':
self.nsd[u''] = val
elif attr.startswith('xmlns:'):
self.nsd[attr[6:]] = val
self.attrs[attr]=attrs[attr]
if tag:
if node_built:
pfx,self.name = (['']+tag.split(':'))[-2:]
self.namespace = self.lookup_nsp(pfx)
else:
if ' ' in tag:
self.namespace,self.name = tag.split()
else:
self.name = tag
if isinstance(payload, basestring): payload=[payload]
for i in payload:
if isinstance(i, Node): self.addChild(node=i)
else: self.data.append(ustr(i))
def lookup_nsp(self,pfx=''):
ns = self.nsd.get(pfx,None)
if ns is None:
ns = self.nsp_cache.get(pfx,None)
if ns is None:
if self.parent:
ns = self.parent.lookup_nsp(pfx)
self.nsp_cache[pfx] = ns
else:
return 'http://www.gajim.org/xmlns/undeclared'
return ns
def __str__(self,fancy=0):
""" Method used to dump node into textual representation.
if "fancy" argument is set to True produces indented output for readability."""
s = (fancy-1) * 2 * ' ' + "<" + self.name
if self.namespace:
if not self.parent or self.parent.namespace!=self.namespace:
if 'xmlns' not in self.attrs:
s = s + ' xmlns="%s"'%self.namespace
for key in self.attrs.keys():
val = ustr(self.attrs[key])
s = s + ' %s="%s"' % ( key, XMLescape(val) )
s = s + ">"
cnt = 0
if self.kids:
if fancy: s = s + "\n"
for a in self.kids:
if not fancy and (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt])
elif (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt].strip())
s = s + a.__str__(fancy and fancy+1)
cnt=cnt+1
if not fancy and (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
elif (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt].strip())
if not self.kids and s.endswith('>'):
s=s[:-1]+' />'
if fancy: s = s + "\n"
else:
if fancy and not self.data: s = s + (fancy-1) * 2 * ' '
s = s + "</" + self.name + ">"
if fancy: s = s + "\n"
return s
def addChild(self, name=None, attrs={}, payload=[], namespace=None, node=None):
""" If "node" argument is provided, adds it as child node. Else creates new node from
the other arguments' values and adds it as well."""
if node:
newnode=node
node.parent = self
else: newnode=Node(tag=name, parent=self, attrs=attrs, payload=payload)
if namespace:
newnode.setNamespace(namespace)
self.kids.append(newnode)
return newnode
def addData(self, data):
""" Adds some CDATA to node. """
self.data.append(ustr(data))
def clearData(self):
""" Removes all CDATA from the node. """
self.data=[]
def delAttr(self, key):
""" Deletes an attribute "key" """
del self.attrs[key]
def delChild(self, node, attrs={}):
""" Deletes the "node" from the node's childs list, if "node" is an instance.
Else deletes the first node that have specified name and (optionally) attributes. """
if not isinstance(node, Node): node=self.getTag(node,attrs)
self.kids.remove(node)
return node
def getAttrs(self):
""" Returns all node's attributes as dictionary. """
return self.attrs
def getAttr(self, key):
""" Returns value of specified attribute. """
try: return self.attrs[key]
except: return None
def getChildren(self):
""" Returns all node's child nodes as list. """
return self.kids
def getData(self):
""" Returns all node CDATA as string (concatenated). """
return ''.join(self.data)
def getName(self):
""" Returns the name of node """
return self.name
def getNamespace(self):
""" Returns the namespace of node """
return self.namespace
def getParent(self):
""" Returns the parent of node (if present). """
return self.parent
def getPayload(self):
""" Return the payload of node i.e. list of child nodes and CDATA entries.
F.e. for "<node>text1<nodea/><nodeb/> text2</node>" will be returned list:
['text1', <nodea instance>, <nodeb instance>, ' text2']. """
ret=[]
for i in range(len(self.kids)+len(self.data)+1):
try:
if self.data[i]: ret.append(self.data[i])
except IndexError: pass
try: ret.append(self.kids[i])
except IndexError: pass
return ret
def getTag(self, name, attrs={}, namespace=None):
""" Filters all child nodes using specified arguments as filter.
Returns the first found or None if not found. """
return self.getTags(name, attrs, namespace, one=1)
def getTagAttr(self,tag,attr):
""" Returns attribute value of the child with specified name (or None if no such attribute)."""
try: return self.getTag(tag).attrs[attr]
except: return None
def getTagData(self,tag):
""" Returns cocatenated CDATA of the child with specified name."""
try: return self.getTag(tag).getData()
except: return None
def getTags(self, name, attrs={}, namespace=None, one=0):
""" Filters all child nodes using specified arguments as filter.
Returns the list of nodes found. """
nodes=[]
for node in self.kids:
if namespace and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or node.attrs[key]!=attrs[key]: break
else: nodes.append(node)
if one and nodes: return nodes[0]
if not one: return nodes
def iterTags(self, name, attrs={}, namespace=None):
""" Iterate over all children using specified arguments as filter. """
for node in self.kids:
if namespace is not None and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or \
node.attrs[key]!=attrs[key]: break
else:
yield node
def setAttr(self, key, val):
""" Sets attribute "key" with the value "val". """
self.attrs[key]=val
def setData(self, data):
""" Sets node's CDATA to provided string. Resets all previous CDATA!"""
self.data=[ustr(data)]
def setName(self,val):
""" Changes the node name. """
self.name = val
def setNamespace(self, namespace):
""" Changes the node namespace. """
self.namespace=namespace
def setParent(self, node):
""" Sets node's parent to "node". WARNING: do not checks if the parent already present
and not removes the node from the list of childs of previous parent. """
self.parent = node
def setPayload(self,payload,add=0):
""" Sets node payload according to the list specified. WARNING: completely replaces all node's
previous content. If you wish just to add child or CDATA - use addData or addChild methods. """
if isinstance(payload, basestring): payload=[payload]
if add: self.kids+=payload
else: self.kids=payload
def setTag(self, name, attrs={}, namespace=None):
""" Same as getTag but if the node with specified namespace/attributes not found, creates such
node and returns it. """
node=self.getTags(name, attrs, namespace=namespace, one=1)
if node: return node
else: return self.addChild(name, attrs, namespace=namespace)
def setTagAttr(self,tag,attr,val):
""" Creates new node (if not already present) with name "tag"
and sets it's attribute "attr" to value "val". """
try: self.getTag(tag).attrs[attr]=val
except: self.addChild(tag,attrs={attr:val})
def setTagData(self,tag,val,attrs={}):
""" Creates new node (if not already present) with name "tag" and (optionally) attributes "attrs"
and sets it's CDATA to string "val". """
try: self.getTag(tag,attrs).setData(ustr(val))
except: self.addChild(tag,attrs,payload=[ustr(val)])
def has_attr(self,key):
""" Checks if node have attribute "key"."""
return key in self.attrs
def __getitem__(self,item):
""" Returns node's attribute "item" value. """
return self.getAttr(item)
def __setitem__(self,item,val):
""" Sets node's attribute "item" value. """
return self.setAttr(item,val)
def __delitem__(self,item):
""" Deletes node's attribute "item". """
return self.delAttr(item)
def __getattr__(self,attr):
""" Reduce memory usage caused by T/NT classes - use memory only when needed. """
if attr=='T':
self.T=T(self)
return self.T
if attr=='NT':
self.NT=NT(self)
return self.NT
raise AttributeError
class T:
""" Auxiliary class used to quick access to node's child nodes. """
def __init__(self,node): self.__dict__['node']=node
def __getattr__(self,attr): return self.node.setTag(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): Node.__init__(self.node.setTag(attr),node=val)
else: return self.node.setTagData(attr,val)
def __delattr__(self,attr): return self.node.delChild(attr)
class NT(T):
""" Auxiliary class used to quick create node's child nodes. """
def __getattr__(self,attr): return self.node.addChild(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): self.node.addChild(attr,node=val)
else: return self.node.addChild(attr,payload=[val])
DBG_NODEBUILDER = 'nodebuilder'
class NodeBuilder:
""" Builds a Node class minidom from data parsed to it. This class used for two purposes:
1. Creation an XML Node from a textual representation. F.e. reading a config file. See an XML2Node method.
2. Handling an incoming XML stream. This is done by mangling
the __dispatch_depth parameter and redefining the dispatch method.
You do not need to use this class directly if you do not designing your own XML handler."""
def __init__(self,data=None,initial_node=None):
""" Takes two optional parameters: "data" and "initial_node".
By default class initialised with empty Node class instance.
Though, if "initial_node" is provided it used as "starting point".
You can think about it as of "node upgrade".
"data" (if provided) feeded to parser immidiatedly after instance init.
"""
self.DEBUG(DBG_NODEBUILDER, "Preparing to handle incoming XML stream.", 'start')
self._parser = xml.parsers.expat.ParserCreate()
self._parser.StartElementHandler = self.starttag
self._parser.EndElementHandler = self.endtag
self._parser.StartNamespaceDeclHandler = self.handle_namespace_start
self._parser.CharacterDataHandler = self.handle_cdata
self.Parse = self._parser.Parse
self.__depth = 0
self.__last_depth = 0
self.__max_depth = 0
self._dispatch_depth = 1
self._document_attrs = None
self._document_nsp = None
self._mini_dom=initial_node
self.last_is_data = 1
self._ptr=None
self.data_buffer = None
if data:
self._parser.Parse(data,1)
def check_data_buffer(self):
if self.data_buffer:
self._ptr.data.append(''.join(self.data_buffer))
del self.data_buffer[:]
self.data_buffer = None
def destroy(self):
""" Method used to allow class instance to be garbage-collected. """
self.check_data_buffer()
self._parser.StartElementHandler = None
self._parser.EndElementHandler = None
self._parser.CharacterDataHandler = None
self._parser.StartNamespaceDeclHandler = None
def starttag(self, tag, attrs):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
self._inc_depth()
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s, attrs -> %s" % (self.__depth, tag, repr(attrs)), 'down')
if self.__depth == self._dispatch_depth:
if not self._mini_dom :
self._mini_dom = Node(tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
else:
Node.__init__(self._mini_dom,tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
self._ptr = self._mini_dom
elif self.__depth > self._dispatch_depth:
self._ptr.kids.append(Node(tag=tag,parent=self._ptr,attrs=attrs, node_built=True))
self._ptr = self._ptr.kids[-1]
if self.__depth == 1:
self._document_attrs = {}
self._document_nsp = {}
nsp, name = (['']+tag.split(':'))[-2:]
for attr,val in attrs.items():
if attr == 'xmlns':
self._document_nsp[u''] = val
elif attr.startswith('xmlns:'):
self._document_nsp[attr[6:]] = val
else:
self._document_attrs[attr] = val
ns = self._document_nsp.get(nsp, 'http://www.gajim.org/xmlns/undeclared-root')
self.stream_header_received(ns, name, attrs)
if not self.last_is_data and self._ptr.parent:
self._ptr.parent.data.append('')
self.last_is_data = 0
def endtag(self, tag ):
"""XML Parser callback. Used internally"""
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s" % (self.__depth, tag), 'up')
self.check_data_buffer()
if self.__depth == self._dispatch_depth:
self.dispatch(self._mini_dom)
elif self.__depth > self._dispatch_depth:
self._ptr = self._ptr.parent
else:
self.DEBUG(DBG_NODEBUILDER, "Got higher than dispatch level. Stream terminated?", 'stop')
self._dec_depth()
self.last_is_data = 0
if self.__depth == 0: self.stream_footer_received()
def handle_cdata(self, data):
if self.last_is_data:
if self.data_buffer:
self.data_buffer.append(data)
elif self._ptr:
self.data_buffer = [data]
self.last_is_data = 1
def handle_namespace_start(self, prefix, uri):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
def DEBUG(self, level, text, comment=None):
""" Gets all NodeBuilder walking events. Can be used for debugging if redefined."""
def getDom(self):
""" Returns just built Node. """
self.check_data_buffer()
return self._mini_dom
def dispatch(self,stanza):
""" Gets called when the NodeBuilder reaches some level of depth on it's way up with the built
node as argument. Can be redefined to convert incoming XML stanzas to program events. """
def stream_header_received(self,ns,tag,attrs):
""" Method called when stream just opened. """
self.check_data_buffer()
def stream_footer_received(self):
""" Method called when stream just closed. """
self.check_data_buffer()
def has_received_endtag(self, level=0):
""" Return True if at least one end tag was seen (at level) """
return self.__depth <= level and self.__max_depth > level
def _inc_depth(self):
self.__last_depth = self.__depth
self.__depth += 1
self.__max_depth = max(self.__depth, self.__max_depth)
def _dec_depth(self):
self.__last_depth = self.__depth
self.__depth -= 1
def XML2Node(xml):
""" Converts supplied textual string into XML node. Handy f.e. for reading configuration file.
Raises xml.parser.expat.parsererror if provided string is not well-formed XML. """
return NodeBuilder(xml).getDom()
def BadXML2Node(xml):
""" Converts supplied textual string into XML node. Survives if xml data is cutted half way round.
I.e. "<html>some text <br>some more text". Will raise xml.parser.expat.parsererror on misplaced
tags though. F.e. "<b>some text <br>some more text</b>" will not work."""
return NodeBuilder(xml).getDom()
# vim: se ts=3:
|
sgala/gajim
|
src/common/xmpp/simplexml.py
|
Python
|
gpl-3.0
| 18,852
|
from subprocess import Popen, PIPE
from music_crawler import MusicCrawler
from playlist import Playlist
from song import Song
import sys
def play(mp3Path):
p = Popen(["mpg123", mp3Path], stdout=PIPE, stderr=PIPE)
return p
def stop(process):
process.kill()
print (10 * '-')
print ("Menu:")
print ("1. Generate songs from a directory and make a playlist.")
print ("2. PPrint playlist.")
print ("3. Get next song.")
print ("4. Play song.")
print ("5. Stop song.")
print ("0. Exit.")
while True:
choice = input("Enter your choice: ")
if choice is '1':
directory = input("Type your directory: ")
crawler = MusicCrawler(directory)
new_playlist = crawler.generate_playlist()
elif choice is '2':
new_playlist.pprint_playlist()
elif choice is '3':
current_song = new_playlist.next_song()
print ("The next song is: {}".format(current_song))
elif choice is '4':
bam = play(current_song.title + '.mp3')
elif choice is '5':
stop(bam)
else:
sys.exit()
|
pepincho/Python101-and-Algo1-Courses
|
Programming-101-v3/week4/1-Music-Library/music_player.py
|
Python
|
mit
| 1,058
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Third Party Stuff
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0012_auto_20150709_0842'),
]
operations = [
migrations.AddField(
model_name='proposalcomment',
name='vote',
field=models.BooleanField(default=False, verbose_name='Is Justification?'),
preserve_default=True,
),
]
|
ChillarAnand/junction
|
junction/proposals/migrations/0013_proposalcomment_vote.py
|
Python
|
mit
| 501
|
from __future__ import absolute_import
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
# local test models
from .admin import InnerInline
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book)
class TestInline(TestCase):
urls = "regressiontests.admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context[-1]['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
inner = Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-Book Relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't cary her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': u'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': u'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbock.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callabe should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />')
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" '
'id="id_-1-0-capo_famiglia" />')
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />')
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />')
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" '
'id="id_-2-0-capo_famiglia" />')
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />')
class TestInlineMedia(TestCase):
urls = "regressiontests.admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
class TestInlineAdminForm(TestCase):
urls = "regressiontests.admin_inlines.urls"
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
urls = "regressiontests.admin_inlines.urls"
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name=u'The Author')
book = author.books.create(name=u'The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for thw Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, 'value="3" id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-Book Relationship')
self.assertContains(response, 'value="4" id="id_Author_books-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="Author_books-0-id" value="%i"' % self.author_book_auto_m2m_intermediate_id)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, 'value="3" id="id_inner2_set-TOTAL_FORMS"')
self.assertNotContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, 'value="1" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
# max-num 0 means we can't add new ones
self.assertContains(response, 'value="0" id="id_inner2_set-MAX_NUM_FORMS"')
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, 'value="4" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, 'value="1" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, 'value="4" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
|
mixman/djangodev
|
tests/regressiontests/admin_inlines/tests.py
|
Python
|
bsd-3-clause
| 19,368
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from astakos.quotaholder_app.exception import NoCapacityError, NoQuantityError
class Operation(object):
@staticmethod
def assertions(holding):
assert(holding.usage_min <= holding.usage_max)
@classmethod
def _prepare(cls, holding, quantity, check=True):
raise NotImplementedError
@classmethod
def prepare(cls, holding, quantity, check=True):
cls.assertions(holding)
cls._prepare(holding, quantity, check)
@classmethod
def _finalize(cls, holding, quantity):
raise NotImplementedError
@classmethod
def finalize(cls, holding, quantity):
cls.assertions(holding)
cls._finalize(holding, quantity)
@classmethod
def undo(cls, holding, quantity):
cls.prepare(holding, -quantity, check=False)
@classmethod
def revert(cls, holding, quantity):
# Assertions do not hold when reverting
cls._prepare(holding, -quantity, check=False)
@classmethod
def provision(cls, holding, quantity, importing=True):
return {'holder': holding.holder,
'source': holding.source,
'resource': holding.resource,
'quantity': quantity if importing else -quantity,
}
class Import(Operation):
@classmethod
def _prepare(cls, holding, quantity, check=True):
usage_max = holding.usage_max
new_usage_max = usage_max + quantity
limit = holding.limit
if check and new_usage_max > limit:
holder = holding.holder
resource = holding.resource
m = ("%s has not enough capacity of %s." % (holder, resource))
provision = cls.provision(holding, quantity, importing=True)
raise NoCapacityError(m,
provision=provision,
limit=limit,
usage=usage_max)
holding.usage_max = new_usage_max
holding.save()
@classmethod
def _finalize(cls, holding, quantity):
holding.usage_min += quantity
holding.save()
class Release(Operation):
@classmethod
def _prepare(cls, holding, quantity, check=True):
usage_min = holding.usage_min
new_usage_min = usage_min - quantity
if check and new_usage_min < 0:
holder = holding.holder
resource = holding.resource
m = ("%s attempts to release more %s than it contains." %
(holder, resource))
provision = cls.provision(holding, quantity, importing=False)
raise NoQuantityError(m,
provision=provision,
limit=0,
usage=usage_min)
holding.usage_min = new_usage_min
holding.save()
@classmethod
def _finalize(cls, holding, quantity):
holding.usage_max -= quantity
holding.save()
class Operations(object):
def __init__(self):
self.operations = []
def prepare(self, operation, holding, quantity, force):
check = not force
operation.prepare(holding, quantity, check)
self.operations.append((operation, holding, quantity))
def revert(self):
for (operation, holding, quantity) in self.operations:
operation.revert(holding, quantity)
def finalize(operation, holding, quantity):
operation.finalize(holding, quantity)
def undo(operation, holding, quantity):
operation.undo(holding, quantity)
|
grnet/synnefo
|
snf-astakos-app/astakos/quotaholder_app/commission.py
|
Python
|
gpl-3.0
| 4,226
|
#!/usr/bin/env python
import biggles
import numpy
#
# Create example 2-dimensional data set of two solitons colliding.
#
n = 64
x = numpy.arange( -10., 10., 20./n )
t = numpy.arange( -1., 1., 2./n )
z = numpy.zeros( (len(x),len(t)) )
for i in range(len(x)):
for j in range(len(t)):
z[i,j] = -12. * (3. + 4.*numpy.cosh(2.*x[i]-8.*t[j]) \
+ numpy.cosh(4.*x[i] - 64.*t[j])) / \
(3.*numpy.cosh(x[i]-28.*t[j]) \
+ numpy.cosh(3.*x[i]-36.*t[j]))**2
#
# Make contour component.
#
c = biggles.Contours( z, x, t, color="red" )
#
# For fine-grained color control, the Contours component allows you to
# specify a function which returns the color applied to each contour line.
# The arguments passed to the function are:
#
# i integer index of contour (0,..,n-1)
# n total number of contours
# z0 z value of contour
# z_min minimum z contour value
# z_max maximum z contour value
#
# The function should return a valid color, or None for the default.
#
# Here we show how to set every other contour to blue. The remaining
# contours are drawn with the default color, defined above to be red.
#
def even_blue( i, n, z0, z_min, z_max ):
if i % 2 == 0:
return 0x0000ff
return None
c.func_color = even_blue
#
# Similarly, Contours accepts similar functions for line type (.func_linestyle)
# and width (.func_linewidth). The arguments passed are the same.
#
#
# Make framed plot container and add contour component.
#
p = biggles.FramedPlot()
p.add( c )
#
# Output.
#
#p.write_img( 400, 400, "example8.png" )
#p.write_eps( "example8.eps" )
p.show()
|
kstory8/biggles
|
examples/example8.py
|
Python
|
gpl-2.0
| 1,668
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict # noqa
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Rule(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall rule."""
def get_dict(self):
rule_dict = self._apidict
rule_dict['rule_id'] = rule_dict['id']
return rule_dict
class Policy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall policy."""
def get_dict(self):
policy_dict = self._apidict
policy_dict['policy_id'] = policy_dict['id']
return policy_dict
class Firewall(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall."""
def get_dict(self):
firewall_dict = self._apidict
firewall_dict['firewall_id'] = firewall_dict['id']
return firewall_dict
def rule_create(request, **kwargs):
"""Create a firewall rule
:param request: request context
:param name: name for rule
:param description: description for rule
:param protocol: protocol for rule
:param action: action for rule
:param source_ip_address: source IP address or subnet
:param source_port: integer in [1, 65535] or range in a:b
:param destination_ip_address: destination IP address or subnet
:param destination_port: integer in [1, 65535] or range in a:b
:param shared: boolean (default false)
:param enabled: boolean (default true)
:return: Rule object
"""
body = {'firewall_rule': kwargs}
rule = neutronclient(request).create_firewall_rule(
body).get('firewall_rule')
return Rule(rule)
def rule_list(request, **kwargs):
return _rule_list(request, expand_policy=True, **kwargs)
def _rule_list(request, expand_policy, **kwargs):
rules = neutronclient(request).list_firewall_rules(
**kwargs).get('firewall_rules')
if expand_policy:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for rule in rules:
rule['policy'] = policy_dict.get(rule['firewall_policy_id'])
return [Rule(r) for r in rules]
def rule_get(request, rule_id):
return _rule_get(request, rule_id, expand_policy=True)
def _rule_get(request, rule_id, expand_policy):
rule = neutronclient(request).show_firewall_rule(
rule_id).get('firewall_rule')
if expand_policy:
if rule['firewall_policy_id']:
rule['policy'] = _policy_get(request, rule['firewall_policy_id'],
expand_rule=False)
else:
rule['policy'] = None
return Rule(rule)
def rule_delete(request, rule_id):
neutronclient(request).delete_firewall_rule(rule_id)
def rule_update(request, rule_id, **kwargs):
body = {'firewall_rule': kwargs}
rule = neutronclient(request).update_firewall_rule(
rule_id, body).get('firewall_rule')
return Rule(rule)
def policy_create(request, **kwargs):
"""Create a firewall policy
:param request: request context
:param name: name for policy
:param description: description for policy
:param firewall_rules: ordered list of rules in policy
:param shared: boolean (default false)
:param audited: boolean (default false)
:return: Policy object
"""
body = {'firewall_policy': kwargs}
policy = neutronclient(request).create_firewall_policy(
body).get('firewall_policy')
return Policy(policy)
def policy_list(request, **kwargs):
return _policy_list(request, expand_rule=True, **kwargs)
def _policy_list(request, expand_rule, **kwargs):
policies = neutronclient(request).list_firewall_policies(
**kwargs).get('firewall_policies')
if expand_rule:
rules = _rule_list(request, expand_policy=False)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
for p in policies:
p['rules'] = [rule_dict.get(rule) for rule in p['firewall_rules']]
return [Policy(p) for p in policies]
def policy_get(request, policy_id):
return _policy_get(request, policy_id, expand_rule=True)
def _policy_get(request, policy_id, expand_rule):
policy = neutronclient(request).show_firewall_policy(
policy_id).get('firewall_policy')
if expand_rule:
policy_rules = policy['firewall_rules']
if policy_rules:
rules = _rule_list(request, expand_policy=False,
firewall_policy_id=policy_id)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
policy['rules'] = [rule_dict.get(rule) for rule in policy_rules]
else:
policy['rules'] = []
return Policy(policy)
def policy_delete(request, policy_id):
neutronclient(request).delete_firewall_policy(policy_id)
def policy_update(request, policy_id, **kwargs):
body = {'firewall_policy': kwargs}
policy = neutronclient(request).update_firewall_policy(
policy_id, body).get('firewall_policy')
return Policy(policy)
def policy_insert_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_insert_rule(
policy_id, kwargs)
return Policy(policy)
def policy_remove_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_remove_rule(
policy_id, kwargs)
return Policy(policy)
def firewall_create(request, **kwargs):
"""Create a firewall for specified policy
:param request: request context
:param name: name for firewall
:param description: description for firewall
:param firewall_policy_id: policy id used by firewall
:param shared: boolean (default false)
:param admin_state_up: boolean (default true)
:return: Firewall object
"""
body = {'firewall': kwargs}
firewall = neutronclient(request).create_firewall(body).get('firewall')
return Firewall(firewall)
def firewall_list(request, **kwargs):
return _firewall_list(request, expand_policy=True, **kwargs)
def _firewall_list(request, expand_policy, **kwargs):
firewalls = neutronclient(request).list_firewalls(
**kwargs).get('firewalls')
if expand_policy:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for fw in firewalls:
fw['policy'] = policy_dict.get(fw['firewall_policy_id'])
return [Firewall(f) for f in firewalls]
def firewall_get(request, firewall_id):
return _firewall_get(request, firewall_id, expand_policy=True)
def _firewall_get(request, firewall_id, expand_policy):
firewall = neutronclient(request).show_firewall(
firewall_id).get('firewall')
if expand_policy:
policy_id = firewall['firewall_policy_id']
if policy_id:
firewall['policy'] = _policy_get(request, policy_id,
expand_rule=False)
else:
firewall['policy'] = None
return Firewall(firewall)
def firewall_delete(request, firewall_id):
neutronclient(request).delete_firewall(firewall_id)
def firewall_update(request, firewall_id, **kwargs):
body = {'firewall': kwargs}
firewall = neutronclient(request).update_firewall(
firewall_id, body).get('firewall')
return Firewall(firewall)
|
ikargis/horizon_fod
|
openstack_dashboard/api/fwaas.py
|
Python
|
apache-2.0
| 8,043
|
# Django settings for aeSupernova project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('admin', 'admin@gmail.com')
)
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '111.111.111.111']
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'users',
'USER': 'usuario',
'PASSWORD': 'senha',
'HOST': '111.111.111.111',
'PORT': ''
},
'supernova': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'supernova', # Or path to database file if using sqlite3.
'USER': 'usuario', # Not used with sqlite3.
'PASSWORD': 'senha', # Not used with sqlite3.
'HOST': '111.111.111.111', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
############### VARIABLES ########################
PASTA_TEMPORARIA = '/home/supernova/temp/'
PASTA_RELATORIOS = '/home/supernova/public/relatorios/disciplinas'
TEMPLATE_RELATORIOS = '/home/supernova/aeSupernova/aeSupernova/document/templates/'
PASTA_RELATORIOS_SEPARADO = '/home/supernova/public/relatorios/separado_por_semestres/'
ARQUIVO_CONF_BD = '/home/supernova/aeSupernova/settings.db'
PASTA_SUPERNOVA = '/home/supernova/aeSupernova/'
LOGIN_PAGE = '/aeSupernova/login/'
##################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '/home/supernova/public/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@8bhatmo!nn!ac7*$)yor2#e_bcw^uew_losjl6eb81=3%__4-'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'login.views.UserRestrictMiddleware'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'aeSupernova.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'aeSupernova.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/supernova/aeSupernova/aeSupernova/opticalSheet/templates',
'/home/supernova/aeSupernova/aeSupernova/datafile/templates',
'/home/supernova/aeSupernova/aeSupernova/header/templates',
'/home/supernova/aeSupernova/aeSupernova/generator/templates',
'/home/supernova/aeSupernova/aeSupernova/control/templates',
'/home/supernova/aeSupernova/aeSupernova/encoder/templates',
'/home/supernova/aeSupernova/aeSupernova/presentation/templates',
'/home/supernova/aeSupernova/aeSupernova/templates',
'/home/supernova/aeSupernova/aeSupernova/lerJupiter/templates',
'/home/supernova/aeSupernova/aeSupernova/algeLin/templates',
'/home/supernova/aeSupernova/templates/login',
'/home/supernova/aeSupernova/templates/interface',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'interface',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
SuperNovaPOLIUSP/supernova
|
.django-settings.py
|
Python
|
agpl-3.0
| 7,079
|
#
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
from cStringIO import StringIO
import unittest
import subunit.tests
from subunit import content, content_type, details
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
class TestSimpleDetails(unittest.TestCase):
def test_lineReceived(self):
parser = details.SimpleDetailsParser(None)
parser.lineReceived("foo\n")
parser.lineReceived("bar\n")
self.assertEqual("foo\nbar\n", parser._message)
def test_lineReceived_escaped_bracket(self):
parser = details.SimpleDetailsParser(None)
parser.lineReceived("foo\n")
parser.lineReceived(" ]are\n")
parser.lineReceived("bar\n")
self.assertEqual("foo\n]are\nbar\n", parser._message)
def test_get_message(self):
parser = details.SimpleDetailsParser(None)
self.assertEqual("", parser.get_message())
def test_get_details(self):
parser = details.SimpleDetailsParser(None)
traceback = ""
expected = {}
expected['traceback'] = content.Content(
content_type.ContentType("text", "x-traceback",
{'charset': 'utf8'}),
lambda:[""])
found = parser.get_details()
self.assertEqual(expected.keys(), found.keys())
self.assertEqual(expected['traceback'].content_type,
found['traceback'].content_type)
self.assertEqual(''.join(expected['traceback'].iter_bytes()),
''.join(found['traceback'].iter_bytes()))
def test_get_details_skip(self):
parser = details.SimpleDetailsParser(None)
traceback = ""
expected = {}
expected['reason'] = content.Content(
content_type.ContentType("text", "plain"),
lambda:[""])
found = parser.get_details("skip")
self.assertEqual(expected, found)
def test_get_details_success(self):
parser = details.SimpleDetailsParser(None)
traceback = ""
expected = {}
expected['message'] = content.Content(
content_type.ContentType("text", "plain"),
lambda:[""])
found = parser.get_details("success")
self.assertEqual(expected, found)
class TestMultipartDetails(unittest.TestCase):
def test_get_message_is_None(self):
parser = details.MultipartDetailsParser(None)
self.assertEqual(None, parser.get_message())
def test_get_details(self):
parser = details.MultipartDetailsParser(None)
self.assertEqual({}, parser.get_details())
def test_parts(self):
parser = details.MultipartDetailsParser(None)
parser.lineReceived("Content-Type: text/plain\n")
parser.lineReceived("something\n")
parser.lineReceived("F\r\n")
parser.lineReceived("serialised\n")
parser.lineReceived("form0\r\n")
expected = {}
expected['something'] = content.Content(
content_type.ContentType("text", "plain"),
lambda:["serialised\nform"])
found = parser.get_details()
self.assertEqual(expected.keys(), found.keys())
self.assertEqual(expected['something'].content_type,
found['something'].content_type)
self.assertEqual(''.join(expected['something'].iter_bytes()),
''.join(found['something'].iter_bytes()))
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/lib/subunit/python/subunit/tests/test_details.py
|
Python
|
gpl-2.0
| 4,148
|
from hearthstone.enums import GameTag
from . import enums
class Manager(object):
def __init__(self, obj):
self.obj = obj
self.observers = []
def __getitem__(self, tag):
if self.map.get(tag):
return getattr(self.obj, self.map[tag], 0)
raise KeyError
def __setitem__(self, tag, value):
setattr(self.obj, self.map[tag], value)
def __iter__(self):
for k in self.map:
if self.map[k]:
yield k
def get(self, k, default=None):
return self[k] if k in self.map else default
def items(self):
for k, v in self.map.items():
if v is not None:
yield k, self[k]
def register(self, observer):
self.observers.append(observer)
def update(self, tags):
for k, v in tags.items():
if self.map.get(k) is not None:
self[k] = v
class GameManager(Manager):
map = {
GameTag.CARDTYPE: "type",
GameTag.NEXT_STEP: "next_step",
GameTag.NUM_MINIONS_KILLED_THIS_TURN: "minions_killed_this_turn",
GameTag.PROPOSED_ATTACKER: "proposed_attacker",
GameTag.PROPOSED_DEFENDER: "proposed_defender",
GameTag.STATE: "state",
GameTag.STEP: "step",
GameTag.TURN: "turn",
GameTag.ZONE: "zone",
}
def __init__(self, obj):
super().__init__(obj)
self.counter = 1
obj.entity_id = self.counter
def action_start(self, type, source, index, target):
for observer in self.observers:
observer.action_start(type, source, index, target)
def action_end(self, type, source):
for observer in self.observers:
observer.action_end(type, source)
def new_entity(self, entity):
self.counter += 1
entity.entity_id = self.counter
for observer in self.observers:
observer.new_entity(entity)
def start_game(self):
for observer in self.observers:
observer.start_game()
def step(self, step, next_step):
for observer in self.observers:
observer.game_step(step, next_step)
self.obj.step = step
self.obj.next_step = next_step
class PlayerManager(Manager):
map = {
GameTag.CANT_DRAW: "cant_draw",
GameTag.CARDTYPE: "type",
GameTag.COMBO_ACTIVE: "combo",
GameTag.CONTROLLER: "controller",
GameTag.CURRENT_PLAYER: "current_player",
GameTag.CURRENT_SPELLPOWER: "spellpower",
GameTag.EMBRACE_THE_SHADOW: "healing_as_damage",
GameTag.FATIGUE: "fatigue_counter",
GameTag.FIRST_PLAYER: "first_player",
GameTag.HEALING_DOUBLE: "healing_double",
GameTag.HERO_ENTITY: "hero",
GameTag.LAST_CARD_PLAYED: "last_card_played",
GameTag.MAXHANDSIZE: "max_hand_size",
GameTag.MAXRESOURCES: "max_resources",
GameTag.NUM_CARDS_DRAWN_THIS_TURN: "cards_drawn_this_turn",
GameTag.NUM_CARDS_PLAYED_THIS_TURN: "cards_played_this_turn",
GameTag.NUM_MINIONS_PLAYED_THIS_TURN: "minions_played_this_turn",
GameTag.NUM_MINIONS_PLAYER_KILLED_THIS_TURN: "minions_killed_this_turn",
GameTag.NUM_TIMES_HERO_POWER_USED_THIS_GAME: "times_hero_power_used_this_game",
GameTag.OVERLOAD_LOCKED: "overload_locked",
GameTag.OVERLOAD_OWED: "overloaded",
GameTag.PLAYSTATE: "playstate",
GameTag.RESOURCES: "max_mana",
GameTag.RESOURCES_USED: "used_mana",
GameTag.SPELLPOWER_DOUBLE: "spellpower_double",
GameTag.STARTHANDSIZE: "start_hand_size",
GameTag.HERO_POWER_DOUBLE: "hero_power_double",
GameTag.TEMP_RESOURCES: "temp_mana",
GameTag.TIMEOUT: "timeout",
GameTag.TURN_START: "turn_start",
enums.CANT_OVERLOAD: "cant_overload",
}
CARD_ATTRIBUTE_MAP = {
GameTag.ADJACENT_BUFF: "adjacent_buff",
GameTag.ARMOR: "armor",
GameTag.ATK: "atk",
GameTag.ATTACKING: "attacking",
GameTag.ATTACHED: "owner",
GameTag.AURA: "aura",
GameTag.BATTLECRY: "has_battlecry",
GameTag.CANNOT_ATTACK_HEROES: "cannot_attack_heroes",
GameTag.CANT_ATTACK: "cant_attack",
GameTag.CANT_BE_ATTACKED: "cant_be_attacked",
GameTag.CANT_BE_DAMAGED: "cant_be_damaged",
GameTag.CANT_BE_TARGETED_BY_ABILITIES: "cant_be_targeted_by_abilities",
GameTag.CANT_BE_TARGETED_BY_HERO_POWERS: "cant_be_targeted_by_hero_powers",
GameTag.CANT_BE_TARGETED_BY_OPPONENTS: "cant_be_targeted_by_opponents",
GameTag.CANT_PLAY: "cant_play",
GameTag.CARD_ID: "id",
GameTag.CARD_TARGET: "target",
GameTag.CARDNAME: "name",
GameTag.CARDRACE: "race",
GameTag.CARDTYPE: "type",
GameTag.CHARGE: "charge",
GameTag.CLASS: "card_class",
GameTag.COMBO: "has_combo",
GameTag.CONTROLLER: "controller",
GameTag.COST: "cost",
GameTag.CREATOR: "creator",
GameTag.DAMAGE: "damage",
GameTag.DEATHRATTLE: "has_deathrattle",
GameTag.DEFENDING: "defending",
GameTag.DIVINE_SHIELD: "divine_shield",
GameTag.DURABILITY: "max_durability",
GameTag.EMBRACE_THE_SHADOW: "healing_as_damage",
GameTag.ENRAGED: "enrage",
GameTag.EXHAUSTED: "exhausted",
GameTag.EXTRA_DEATHRATTLES: "extra_deathrattles",
GameTag.FORGETFUL: "forgetful",
GameTag.FROZEN: "frozen",
GameTag.HEALING_DOUBLE: "healing_double",
GameTag.HEALTH: "max_health",
GameTag.HEALTH_MINIMUM: "min_health",
GameTag.HEAVILY_ARMORED: "heavily_armored",
GameTag.HEROPOWER_ADDITIONAL_ACTIVATIONS: "additional_activations",
GameTag.HEROPOWER_DAMAGE: "heropower_damage",
GameTag.INCOMING_DAMAGE_MULTIPLIER: "incoming_damage_multiplier",
GameTag.ImmuneToSpellpower: "immune_to_spellpower",
GameTag.IMMUNE_WHILE_ATTACKING: "immune_while_attacking",
GameTag.INSPIRE: "has_inspire",
GameTag.NUM_ATTACKS_THIS_TURN: "num_attacks",
GameTag.NUM_TURNS_IN_PLAY: "turns_in_play",
GameTag.TAG_ONE_TURN_EFFECT: "one_turn_effect",
GameTag.OVERLOAD: "overload",
GameTag.POISONOUS: "poisonous",
GameTag.POWERED_UP: "powered_up",
GameTag.RARITY: "rarity",
GameTag.RECEIVES_DOUBLE_SPELLDAMAGE_BONUS: "receives_double_spelldamage_bonus",
GameTag.SECRET: "secret",
GameTag.SHADOWFORM: "shadowform",
GameTag.SHOULDEXITCOMBAT: "should_exit_combat",
GameTag.SILENCED: "silenced",
GameTag.SPELLPOWER: "spellpower",
GameTag.SPELLPOWER_DOUBLE: "spellpower_double",
GameTag.SPELLS_COST_HEALTH: "spells_cost_health",
GameTag.STEALTH: "stealthed",
GameTag.TAG_AI_MUST_PLAY: "autocast",
GameTag.HERO_POWER_DOUBLE: "hero_power_double",
GameTag.TAUNT: "taunt",
GameTag.WINDFURY: "windfury",
GameTag.ZONE: "zone",
GameTag.ZONE_POSITION: "zone_position",
enums.ALWAYS_WINS_BRAWLS: "always_wins_brawls",
enums.EXTRA_BATTLECRIES: "extra_battlecries",
enums.KILLED_THIS_TURN: "killed_this_turn",
GameTag.AFFECTED_BY_SPELL_POWER: None,
GameTag.ARTISTNAME: None,
GameTag.AttackVisualType: None,
GameTag.CARD_SET: None,
GameTag.CARDTEXT_INHAND: None,
GameTag.CardTextInPlay: None,
GameTag.Collectible: None,
GameTag.DevState: None,
GameTag.ELITE: None,
GameTag.ENCHANTMENT_IDLE_VISUAL: None,
GameTag.ENCHANTMENT_BIRTH_VISUAL: None,
GameTag.EVIL_GLOW: None,
GameTag.FACTION: None,
GameTag.FLAVORTEXT: None,
GameTag.FREEZE: None,
GameTag.HealTarget: None,
GameTag.HIDE_COST: None,
GameTag.HOW_TO_EARN: None,
GameTag.HOW_TO_EARN_GOLDEN: None,
GameTag.InvisibleDeathrattle: None,
GameTag.MORPH: None,
GameTag.SILENCE: None,
GameTag.SUMMONED: None,
GameTag.SPARE_PART: None,
GameTag.SHOWN_HERO_POWER: None,
GameTag.TARGETING_ARROW_TEXT: None,
GameTag.TOPDECK: None,
GameTag.TAG_AI_MUST_PLAY: None,
GameTag.TRIGGER_VISUAL: None,
}
class CardManager(Manager):
map = CARD_ATTRIBUTE_MAP
|
smallnamespace/fireplace
|
fireplace/managers.py
|
Python
|
agpl-3.0
| 7,099
|
# -*- coding: utf-8 -*-
"""
flask.ext.babelex
~~~~~~~~~~~~~~~~~
Implements i18n/l10n support for Flask applications based on Babel.
:copyright: (c) 2013 by Serge S. Koval, Armin Ronacher and contributors.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
# this is a workaround for a snow leopard bug that babel does not
# work around :)
if os.environ.get('LC_CTYPE', '').lower() == 'utf-8':
os.environ['LC_CTYPE'] = 'en_US.utf-8'
from datetime import datetime
from flask import _request_ctx_stack
from babel import dates, numbers, support, Locale
from babel.support import NullTranslations
from werkzeug import ImmutableDict
try:
from pytz.gae import pytz
except ImportError:
from pytz import timezone, UTC
else:
timezone = pytz.timezone
UTC = pytz.UTC
from flask_babelex._compat import string_types
_DEFAULT_LOCALE = Locale.parse('en')
class Babel(object):
"""Central controller class that can be used to configure how
Flask-Babel behaves. Each application that wants to use Flask-Babel
has to create, or run :meth:`init_app` on, an instance of this class
after the configuration was initialized.
"""
default_date_formats = ImmutableDict({
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
})
def __init__(self, app=None, default_locale='en', default_timezone='UTC',
date_formats=None, configure_jinja=True, default_domain=None):
self._default_locale = default_locale
self._default_timezone = default_timezone
self._date_formats = date_formats
self._configure_jinja = configure_jinja
self.app = app
self._locale_cache = dict()
if default_domain is None:
self._default_domain = Domain()
else:
self._default_domain = default_domain
self.locale_selector_func = None
self.timezone_selector_func = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Set up this instance for use with *app*, if no app was passed to
the constructor.
"""
self.app = app
app.babel_instance = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['babel'] = self
app.config.setdefault('BABEL_DEFAULT_LOCALE', self._default_locale)
app.config.setdefault('BABEL_DEFAULT_TIMEZONE', self._default_timezone)
if self._date_formats is None:
self._date_formats = self.default_date_formats.copy()
#: a mapping of Babel datetime format strings that can be modified
#: to change the defaults. If you invoke :func:`format_datetime`
#: and do not provide any format string Flask-Babel will do the
#: following things:
#:
#: 1. look up ``date_formats['datetime']``. By default ``'medium'``
#: is returned to enforce medium length datetime formats.
#: 2. ``date_formats['datetime.medium'] (if ``'medium'`` was
#: returned in step one) is looked up. If the return value
#: is anything but `None` this is used as new format string.
#: otherwise the default for that language is used.
self.date_formats = self._date_formats
if self._configure_jinja:
app.jinja_env.filters.update(
datetimeformat=format_datetime,
dateformat=format_date,
timeformat=format_time,
timedeltaformat=format_timedelta,
numberformat=format_number,
decimalformat=format_decimal,
currencyformat=format_currency,
percentformat=format_percent,
scientificformat=format_scientific,
)
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_gettext_callables(
lambda x: get_domain().get_translations().ugettext(x),
lambda s, p, n: get_domain().get_translations().ungettext(s, p, n),
newstyle=True
)
def localeselector(self, f):
"""Registers a callback function for locale selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the locale falls back to the one from
the configuration.
This has to return the locale as string (eg: ``'de_AT'``, ''`en_US`'')
"""
assert self.locale_selector_func is None, \
'a localeselector function is already registered'
self.locale_selector_func = f
return f
def timezoneselector(self, f):
"""Registers a callback function for timezone selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the timezone falls back to the one from
the configuration.
This has to return the timezone as string (eg: ``'Europe/Vienna'``)
"""
assert self.timezone_selector_func is None, \
'a timezoneselector function is already registered'
self.timezone_selector_func = f
return f
def list_translations(self):
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings.
.. versionadded:: 0.6
"""
dirname = os.path.join(self.app.root_path, 'translations')
if not os.path.isdir(dirname):
return []
result = []
for folder in os.listdir(dirname):
locale_dir = os.path.join(dirname, folder, 'LC_MESSAGES')
if not os.path.isdir(locale_dir):
continue
if filter(lambda x: x.endswith('.mo'), os.listdir(locale_dir)):
result.append(Locale.parse(folder))
if not result:
result.append(Locale.parse(self._default_locale))
return result
@property
def default_locale(self):
"""The default locale from the configuration as instance of a
`babel.Locale` object.
"""
return self.load_locale(self.app.config['BABEL_DEFAULT_LOCALE'])
@property
def default_timezone(self):
"""The default timezone from the configuration as instance of a
`pytz.timezone` object.
"""
return timezone(self.app.config['BABEL_DEFAULT_TIMEZONE'])
def load_locale(self, locale):
"""Load locale by name and cache it. Returns instance of a `babel.Locale`
object.
"""
rv = self._locale_cache.get(locale)
if rv is None:
self._locale_cache[locale] = rv = Locale.parse(locale)
return rv
def get_locale():
"""Returns the locale that should be used for this request as
`babel.Locale` object. This returns `None` if used outside of
a request. If flask-babel was not attached to the Flask application,
will return 'en' locale.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
locale = getattr(ctx, 'babel_locale', None)
if locale is None:
babel = ctx.app.extensions.get('babel')
if babel is None:
locale = _DEFAULT_LOCALE
else:
if babel.locale_selector_func is not None:
rv = babel.locale_selector_func()
if rv is None:
locale = babel.default_locale
else:
locale = babel.load_locale(rv)
else:
locale = babel.default_locale
ctx.babel_locale = locale
return locale
def get_timezone():
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `None` if used outside of
a request. If flask-babel was not attached to application, will
return UTC timezone object.
"""
ctx = _request_ctx_stack.top
tzinfo = getattr(ctx, 'babel_tzinfo', None)
if tzinfo is None:
babel = ctx.app.extensions.get('babel')
if babel is None:
tzinfo = UTC
else:
if babel.timezone_selector_func is None:
tzinfo = babel.default_timezone
else:
rv = babel.timezone_selector_func()
if rv is None:
tzinfo = babel.default_timezone
else:
if isinstance(rv, string_types):
tzinfo = timezone(rv)
else:
tzinfo = rv
ctx.babel_tzinfo = tzinfo
return tzinfo
def refresh():
"""Refreshes the cached timezones and locale information. This can
be used to switch a translation between a request and if you want
the changes to take place immediately, not just with the next request::
user.timezone = request.form['timezone']
user.locale = request.form['locale']
refresh()
flash(gettext('Language was changed'))
Without that refresh, the :func:`~flask.flash` function would probably
return English text and a now German page.
"""
ctx = _request_ctx_stack.top
for key in 'babel_locale', 'babel_tzinfo':
if hasattr(ctx, key):
delattr(ctx, key)
def _get_format(key, format):
"""A small helper for the datetime formatting functions. Looks up
format defaults for different kinds.
"""
babel = _request_ctx_stack.top.app.extensions.get('babel')
if babel is not None:
formats = babel.date_formats
else:
formats = Babel.default_date_formats
if format is None:
format = formats[key]
if format in ('short', 'medium', 'full', 'long'):
rv = formats['%s.%s' % (key, format)]
if rv is not None:
format = rv
return format
def to_user_timezone(datetime):
"""Convert a datetime object to the user's timezone. This automatically
happens on all date formatting unless rebasing is disabled. If you need
to convert a :class:`datetime.datetime` object at any time to the user's
timezone (as returned by :func:`get_timezone` this function can be used).
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
tzinfo = get_timezone()
return tzinfo.normalize(datetime.astimezone(tzinfo))
def to_utc(datetime):
"""Convert a datetime object to UTC and drop tzinfo. This is the
opposite operation to :func:`to_user_timezone`.
"""
if datetime.tzinfo is None:
datetime = get_timezone().localize(datetime)
return datetime.astimezone(UTC).replace(tzinfo=None)
def format_datetime(datetime=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `datetimeformat`.
"""
format = _get_format('datetime', format)
return _date_format(dates.format_datetime, datetime, format, rebase)
def format_date(date=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` or :class:`~datetime.date` object is passed,
the current time is assumed. By default rebasing happens which causes
the object to be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function only formats the date part
of a :class:`~datetime.datetime` object.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `dateformat`.
"""
if rebase and isinstance(date, datetime):
date = to_user_timezone(date)
format = _get_format('date', format)
return _date_format(dates.format_date, date, format, rebase)
def format_time(time=None, format=None, rebase=True):
"""Return a time formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `timeformat`.
"""
format = _get_format('time', format)
return _date_format(dates.format_time, time, format, rebase)
def format_timedelta(datetime_or_timedelta, granularity='second'):
"""Format the elapsed time from the given date to now or the given
timedelta. This currently requires an unreleased development
version of Babel.
This function is also available in the template context as filter
named `timedeltaformat`.
"""
if isinstance(datetime_or_timedelta, datetime):
datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta
return dates.format_timedelta(datetime_or_timedelta, granularity,
locale=get_locale())
def _date_format(formatter, obj, format, rebase, **extra):
"""Internal helper that formats the date."""
locale = get_locale()
extra = {}
if formatter is not dates.format_date and rebase:
extra['tzinfo'] = get_timezone()
return formatter(obj, format, locale=locale, **extra)
def format_number(number):
"""Return the given number formatted for the locale in request
:param number: the number to format
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_number(number, locale=locale)
def format_decimal(number, format=None):
"""Return the given decimal number formatted for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_decimal(number, format=format, locale=locale)
def format_currency(number, currency, format=None):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param currency: the currency code
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_currency(
number, currency, format=format, locale=locale
)
def format_percent(number, format=None):
"""Return formatted percent value for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_percent(number, format=format, locale=locale)
def format_scientific(number, format=None):
"""Return value formatted in scientific notation for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_scientific(number, format=format, locale=locale)
class Domain(object):
"""Localization domain. By default will use look for tranlations in Flask application directory
and "messages" domain - all message catalogs should be called ``messages.mo``.
"""
def __init__(self, dirname=None, domain='messages'):
self.dirname = dirname
self.domain = domain
self.cache = dict()
def as_default(self):
"""Set this domain as default for the current request"""
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError("No request context")
ctx.babel_domain = self
def get_translations_cache(self, ctx):
"""Returns dictionary-like object for translation caching"""
return self.cache
def get_translations_path(self, ctx):
"""Returns translations directory path. Override if you want
to implement custom behavior.
"""
return self.dirname or os.path.join(ctx.app.root_path, 'translations')
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
dirname = self.get_translations_path(ctx)
translations = support.Translations.load(dirname,
locale,
domain=self.domain)
cache[str(locale)] = translations
return translations
def gettext(self, string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = self.get_translations()
return t.ugettext(string) % variables
def ngettext(self, singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.ungettext(singular, plural, num) % variables
def pgettext(self, context, string, **variables):
"""Like :func:`gettext` but with a context.
.. versionadded:: 0.7
"""
t = self.get_translations()
return t.upgettext(context, string) % variables
def npgettext(self, context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.unpgettext(context, singular, plural, num) % variables
def lazy_gettext(self, string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
from speaklater import make_lazy_string
return make_lazy_string(self.gettext, string, **variables)
def lazy_pgettext(self, context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
from speaklater import make_lazy_string
return make_lazy_string(self.pgettext, context, string, **variables)
# This is the domain that will be used if there is no request context (and thus no app)
# or if the app isn't initialized for babel. Note that if there is no request context,
# then the standard Domain will use NullTranslations
domain = Domain()
def get_domain():
"""Return the correct translation domain that is used for this request.
This will return the default domain (e.g. "messages" in <approot>/translations")
if none is set for this request.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return domain
try:
return ctx.babel_domain
except AttributeError:
pass
babel = ctx.app.extensions.get('babel')
if babel is not None:
d = babel._default_domain
else:
d = domain
ctx.babel_domain = d
return d
# Create shortcuts for the default Flask domain
def gettext(*args, **kwargs):
return get_domain().gettext(*args, **kwargs)
_ = gettext
def ngettext(*args, **kwargs):
return get_domain().ngettext(*args, **kwargs)
def pgettext(*args, **kwargs):
return get_domain().pgettext(*args, **kwargs)
def npgettext(*args, **kwargs):
return get_domain().npgettext(*args, **kwargs)
def lazy_gettext(*args, **kwargs):
return get_domain().lazy_gettext(*args, **kwargs)
def lazy_pgettext(*args, **kwargs):
return get_domain().lazy_pgettext(*args, **kwargs)
|
initNirvana/Easyphotos
|
env/lib/python3.4/site-packages/flask_babelex/__init__.py
|
Python
|
mit
| 22,503
|
#!/usr/bin/env python
__author__ = 'ilkin safarli'
import unittest
from Classifier import Classifier
class TestClassifier(unittest.TestCase):
def test_predict(self):
x = Classifier()
x.train()
predicted = x.predict("train", "directory")
actual = [(u'intermediate test', 2), (u'elementary test', 1), (u'advanced test', 0)]
self.assertEqual(predicted, actual)
if __name__ == '__main__':
unittest.main()
|
kinimesi/rscore
|
unit_tests/test_Classifier.py
|
Python
|
apache-2.0
| 419
|
# coding: utf-8
from fabkit import env, sudo, filer
from fablib.python import Python
from fablib.base import SimpleBase
import utils
class Barbican(SimpleBase):
def __init__(self):
self.data_key = 'barbican'
self.data = {
}
self.services = [
'barbican-api',
]
def init_before(self):
self.package = env['cluster']['os_package_map']['barbican']
self.prefix = self.package.get('prefix', '/opt/ironic')
self.python = Python(self.prefix)
def init_after(self):
self.data.update({
'keystone': env.cluster['keystone'],
'neutron': env.cluster['neutron'],
'my_ip': env.node['ip']['default_dev']['ip'],
})
def setup(self):
data = self.init()
if self.is_tag('package'):
self.python.setup()
self.python.setup_package(**self.package)
if self.is_tag('conf'):
# setup conf files
if filer.template(
'/etc/barbican/barbican.conf',
src='{0}/barbican.conf.j2'.format(data['version']),
data=data):
self.handlers['restart_barbican-*'] = True
if self.is_tag('data') and env.host == env.hosts[0]:
sudo('{0}/bin/barbican-db-manage '
'upgrade'.format(self.prefix))
if self.is_tag('conf', 'service'):
self.enable_services().start_services(pty=False)
self.exec_handlers()
def cmd(self, cmd):
self.init()
return utils.oscmd('ironic {0}'.format(cmd))
|
fabrickit-fablib/openstack
|
barbican.py
|
Python
|
mit
| 1,614
|
# Copyright (c) 2020, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from erpnext.regional.address_template.setup import set_up_address_templates
def execute():
if frappe.db.get_value('Company', {'country': 'India'}, 'name'):
address_template = frappe.db.get_value('Address Template', 'India', 'template')
if not address_template or "gstin" not in address_template:
set_up_address_templates(default_country='India')
|
frappe/erpnext
|
erpnext/patches/v12_0/update_address_template_for_india.py
|
Python
|
gpl-3.0
| 480
|
#Задача 5. Вариант 48
#Напишите программу, которая бы при запуске случайным образом отображала
#название одного из восьми категорий, на которые разделяются дорожные знаки в
#соответствии с Венской конвенцией о дорожных знаках и сигналах.
#Generalov K. A.
import random
a = random.choice(['Предупреждающие знаки.','Знаки преимущественного права проезда.','Запрещающие и ограничивающие знаки.', 'Предписывающие знаки.', 'Знаки особых предписаний.', 'Информационные знаки, знаки, обозначающие объекты и знаки сервиса.', 'Указатели направлений и информационно-указательные знаки.', 'Дополнительные таблички.'])
print(a)
input("\nВведите Enter для завершения")
|
Mariaanisimova/pythonintask
|
PINp/2014/Generalov_K_A/task_5_48.py
|
Python
|
apache-2.0
| 1,127
|
import string
__doc__ = """
label = iolabel(itracer)
maps integers 1..3843 to length-two strings:
1..99 => 01..99
100..619 => 0a..0Z,1a..9Z
620..3843 => aa..ZZ
itracer = iolabel2num(label)
does the inverse.
"""
_iolabel_set10 = string.digits
_iolabel_set52 = string.ascii_letters
_iolabel_set62 = _iolabel_set10 + _iolabel_set52
def iolabel(i):
''' Map tracer number (1..3843) to 2-character I/O label. '''
if i < 100:
return '{0:02d}'.format(i)
elif i < 620: # 100 + 10*52
a,b = divmod(i-100, 52)
return '{0:d}{1:s}'.format(a,_iolabel_set52[b])
elif i < 3844: # 100 + 10*52 + 52*62
a,b = divmod(i-620, 62)
return '{0:s}{1:s}'.format(_iolabel_set52[a],
_iolabel_set62[b])
else:
raise ValueError('Tracer numbers > 3883 not supported.')
def iolabel2num(s):
''' Map 2-character IO label to tracer number '''
assert len(s) == 2
try:
i = int(s)
except ValueError:
try:
i1 = int(s[0])
except ValueError:
i1 = _iolabel_set52.index(s[0])
i2 = _iolabel_set62.index(s[1])
i = 620 + i1*62 + i2
else:
i2 = _iolabel_set52.index(s[1])
i = 100 + i1*52 + i2
return i
|
altMITgcm/MITgcm66h
|
utils/python/MITgcmutils/MITgcmutils/ptracers.py
|
Python
|
mit
| 1,321
|
import os
import socket
import config
def handle_cipher():
# msg is the orignal login msg
f = open('data/msg.txt', 'r')
msg = f.read()
f.close()
# write the cipher info, remove the header info
cipherText = msg[8:] # first 8 char is header
f = open('data/ciphertext.txt', 'w')
f.write(cipherText)
f.close()
# decrypt the cipher
key = 'key/private.pem'
fin = 'data/ciphertext.txt'
fout = 'data/plaintext.txt'
cmd = 'openssl rsautl -decrypt -inkey ' + key + ' -in ' + fin + ' -out ' + fout
os.system(cmd)
# get following:
# {"username":"NetworkSecurity","password":"projectissoeasy","isAdmin":false}d86083dccf261011ce3ca716bf2bba2c41a4d4766a275f36434b1484ea68cb04
# read the plaintext we just decrypt
f = open('data/plaintext.txt', 'r')
plainText = f.read()
f.close()
# analysis the plaintext, and generate the new plaintext
ticket = plainText[75:]
# d86083dccf261011ce3ca716bf2bba2c41a4d4766a275f36434b1484ea68cb04
loginInfo = plainText[:69] + 'true}'
# {"username":"NetworkSecurity","password":"projectissoeasy","isAdmin":true}
newPlaintext = loginInfo + ticket
# write the new loginMsg into file
f = open('data/newPlaintext.txt', 'w')
f.write(newPlaintext)
f.close()
# encrypt the new plaintext
key = 'key/public.pem'
fin = 'data/newPlaintext.txt'
fout = 'data/newCiphertext.txt'
cmd = 'openssl rsautl -encrypt -pubin -inkey ' + key + ' -in ' + fin + ' -out ' + fout
os.system(cmd)
# append the header, also '00000256'
f = open('data/newCiphertext.txt', 'r')
newCipher = f.read()
f.close()
# write the new login info into file
f = open('data/newMsg.txt', 'w')
f.write(msg[:8] + newCipher)
f.close()
def main():
# handle the original cipher msg and
# generate the new cypher msg which allows us to login as admin
handle_cipher()
# read the result of handle cipher
f = open('data/newMsg.txt', 'r')
msg = f.read()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (config.HOST, config.PORT)
print 'connecting to %s port %s' % server_address
sock.connect(server_address)
sock.sendall(msg) # replay login message
sock.sendall('ls\r\n') # to list the file
sock.sendall('cat flag2\r\n') # to get flag1
# send login
while True:
data = sock.recv(1024)
if data:
print data
print 'Close the socket!'
sock.close()
if __name__ == '__main__':
main()
|
Plummy-Panda/MITM-V
|
mitm.py
|
Python
|
mit
| 2,562
|
#! /usr/bin/python
# photoBooth1
# Samir Saidi, Momona Yamagami
import time
#import numpy as np
#import cv2
import picamera
import datetime
from time import strftime
#import
name2 = raw_input("Enter your NetID (ex. aaa1): ")
#Time1= strftime("%Y-%m-%d-%H_%M_%S")
with picamera.PiCamera() as camera:
camera.resolution = (1024, 768)
camera.start_preview()
# camera warm-up time
time.sleep(2)
name1 = "/home/pi/Picture-Yo-self/code/pictures/" + name2 + ".jpg"
camera.capture(name1)
#cv2.imshow('image',name1)
f = open('/home/pi/Picture-Yo-self/code/pictures/picName.txt','w')
f.write(name1)
f.close()
f = open('/home/pi/Picture-Yo-self/code/pictures/email.txt','w')
f.write(name2)
f.close()
|
PictureYo-self/Picture-Yo-self
|
code/useless/capture.py
|
Python
|
gpl-2.0
| 698
|
"""
Django settings for hashhacks2 project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=ka)r#$$xp14liw(6vfy$h!$s&l)^i+nin7x#xf2=$te4d8h=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'1f7ec466.ngrok.io', "*"]
#ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#custom_apps
'APIs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hashhacks2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hashhacks2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
papajijaat/HashHacks2.0-methOD
|
backend/hashhacks2/hashhacks2/settings.py
|
Python
|
mit
| 3,183
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.