code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN):
'''
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
'''
url = 'https://api.kkbox.com/v1.1/search'
url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr})
if len(types) > 0:
url += '&type=' + ','.join(types)
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
def function[search, parameter[self, keyword, types, terr]]:
constant[
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
]
variable[url] assign[=] constant[https://api.kkbox.com/v1.1/search]
<ast.AugAssign object at 0x7da1b0ef57e0>
if compare[call[name[len], parameter[name[types]]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0ef7700>
return[call[name[self].http._post_data, parameter[name[url], constant[None], call[name[self].http._headers_with_access_token, parameter[]]]]]
|
keyword[def] identifier[search] ( identifier[self] , identifier[keyword] , identifier[types] =[], identifier[terr] = identifier[KKBOXTerritory] . identifier[TAIWAN] ):
literal[string]
identifier[url] = literal[string]
identifier[url] += literal[string] + identifier[url_parse] . identifier[urlencode] ({ literal[string] : identifier[keyword] , literal[string] : identifier[terr] })
keyword[if] identifier[len] ( identifier[types] )> literal[int] :
identifier[url] += literal[string] + literal[string] . identifier[join] ( identifier[types] )
keyword[return] identifier[self] . identifier[http] . identifier[_post_data] ( identifier[url] , keyword[None] , identifier[self] . identifier[http] . identifier[_headers_with_access_token] ())
|
def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN):
"""
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
"""
url = 'https://api.kkbox.com/v1.1/search'
url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr})
if len(types) > 0:
url += '&type=' + ','.join(types) # depends on [control=['if'], data=[]]
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
def code128(self, data, **kwargs):
"""Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
"""
if not re.match(r'^[\x20-\x7F]+$', data):
raise ValueError('Invalid Code 128 symbology. Code 128 can encode '
'any ASCII character ranging from 32 (20h) to 127 (7Fh); '
'got {!r}'.format(data))
codeset = kwargs.pop('codeset', barcode.CODE128_A)
barcode.validate_barcode_args(**kwargs)
return self._code128_impl(data, codeset=codeset, **kwargs)
|
def function[code128, parameter[self, data]]:
constant[Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
]
if <ast.UnaryOp object at 0x7da2041dab90> begin[:]
<ast.Raise object at 0x7da2041d9e40>
variable[codeset] assign[=] call[name[kwargs].pop, parameter[constant[codeset], name[barcode].CODE128_A]]
call[name[barcode].validate_barcode_args, parameter[]]
return[call[name[self]._code128_impl, parameter[name[data]]]]
|
keyword[def] identifier[code128] ( identifier[self] , identifier[data] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[data] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[data] ))
identifier[codeset] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[barcode] . identifier[CODE128_A] )
identifier[barcode] . identifier[validate_barcode_args] (** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_code128_impl] ( identifier[data] , identifier[codeset] = identifier[codeset] ,** identifier[kwargs] )
|
def code128(self, data, **kwargs):
"""Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
"""
if not re.match('^[\\x20-\\x7F]+$', data):
raise ValueError('Invalid Code 128 symbology. Code 128 can encode any ASCII character ranging from 32 (20h) to 127 (7Fh); got {!r}'.format(data)) # depends on [control=['if'], data=[]]
codeset = kwargs.pop('codeset', barcode.CODE128_A)
barcode.validate_barcode_args(**kwargs)
return self._code128_impl(data, codeset=codeset, **kwargs)
|
def build_for_each(self, db, safe_mode=False, extra=None):
"""Builds the for-each context."""
result = dict()
for var, query in iteritems(self.for_each):
result[var] = db.query(
query,
additional_locals=extra,
safe_mode=safe_mode
)
return result
|
def function[build_for_each, parameter[self, db, safe_mode, extra]]:
constant[Builds the for-each context.]
variable[result] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b11d4fa0>, <ast.Name object at 0x7da1b11d6fb0>]]] in starred[call[name[iteritems], parameter[name[self].for_each]]] begin[:]
call[name[result]][name[var]] assign[=] call[name[db].query, parameter[name[query]]]
return[name[result]]
|
keyword[def] identifier[build_for_each] ( identifier[self] , identifier[db] , identifier[safe_mode] = keyword[False] , identifier[extra] = keyword[None] ):
literal[string]
identifier[result] = identifier[dict] ()
keyword[for] identifier[var] , identifier[query] keyword[in] identifier[iteritems] ( identifier[self] . identifier[for_each] ):
identifier[result] [ identifier[var] ]= identifier[db] . identifier[query] (
identifier[query] ,
identifier[additional_locals] = identifier[extra] ,
identifier[safe_mode] = identifier[safe_mode]
)
keyword[return] identifier[result]
|
def build_for_each(self, db, safe_mode=False, extra=None):
"""Builds the for-each context."""
result = dict()
for (var, query) in iteritems(self.for_each):
result[var] = db.query(query, additional_locals=extra, safe_mode=safe_mode) # depends on [control=['for'], data=[]]
return result
|
def _get_span_name(servicer_context):
"""Generates a span name based off of the gRPC server rpc_request_info"""
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name)
|
def function[_get_span_name, parameter[servicer_context]]:
constant[Generates a span name based off of the gRPC server rpc_request_info]
variable[method_name] assign[=] call[name[servicer_context]._rpc_event.call_details.method][<ast.Slice object at 0x7da2045660e0>]
if call[name[isinstance], parameter[name[method_name], name[bytes]]] begin[:]
variable[method_name] assign[=] call[name[method_name].decode, parameter[constant[utf-8]]]
variable[method_name] assign[=] call[name[method_name].replace, parameter[constant[/], constant[.]]]
return[call[constant[{}.{}].format, parameter[name[RECV_PREFIX], name[method_name]]]]
|
keyword[def] identifier[_get_span_name] ( identifier[servicer_context] ):
literal[string]
identifier[method_name] = identifier[servicer_context] . identifier[_rpc_event] . identifier[call_details] . identifier[method] [ literal[int] :]
keyword[if] identifier[isinstance] ( identifier[method_name] , identifier[bytes] ):
identifier[method_name] = identifier[method_name] . identifier[decode] ( literal[string] )
identifier[method_name] = identifier[method_name] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[RECV_PREFIX] , identifier[method_name] )
|
def _get_span_name(servicer_context):
"""Generates a span name based off of the gRPC server rpc_request_info"""
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8') # depends on [control=['if'], data=[]]
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name)
|
def draw(self):
"""
Draw guide
Returns
-------
out : matplotlib.offsetbox.Offsetbox
A drawing of this legend
"""
obverse = slice(0, None)
reverse = slice(None, None, -1)
width = self.barwidth
height = self.barheight
nbars = len(self.bar)
length = height
direction = self.direction
colors = self.bar['color'].tolist()
labels = self.key['label'].tolist()
themeable = self.theme.figure._themeable
# When there is more than one guide, we keep
# record of all of them using lists
if 'legend_title' not in themeable:
themeable['legend_title'] = []
if 'legend_text_colorbar' not in themeable:
themeable['legend_text_colorbar'] = []
# .5 puts the ticks in the middle of the bars when
# raster=False. So when raster=True the ticks are
# in between interpolation points and the matching is
# close though not exactly right.
_from = self.bar['value'].min(), self.bar['value'].max()
tick_locations = rescale(self.key['value'],
(.5, nbars-.5),
_from) * length/nbars
if direction == 'horizontal':
width, height = height, width
length = width
if self.reverse:
colors = colors[::-1]
labels = labels[::-1]
tick_locations = length - tick_locations[::-1]
# title #
title_box = TextArea(self.title,
textprops=dict(color='black'))
themeable['legend_title'].append(title_box)
# colorbar and ticks #
da = ColoredDrawingArea(width, height, 0, 0)
if self.raster:
add_interpolated_colorbar(da, colors, direction)
else:
add_segmented_colorbar(da, colors, direction)
if self.ticks:
_locations = tick_locations
if not self.draw_ulim:
_locations = _locations[:-1]
if not self.draw_llim:
_locations = _locations[1:]
add_ticks(da, _locations, direction)
# labels #
if self.label:
labels_da, legend_text = create_labels(da, labels,
tick_locations,
direction)
themeable['legend_text_colorbar'].extend(legend_text)
else:
labels_da = ColoredDrawingArea(0, 0)
# colorbar + labels #
if direction == 'vertical':
packer, align = HPacker, 'bottom'
align = 'center'
else:
packer, align = VPacker, 'right'
align = 'center'
slc = obverse if self.label_position == 'right' else reverse
if self.label_position in ('right', 'bottom'):
slc = obverse
else:
slc = reverse
main_box = packer(children=[da, labels_da][slc],
sep=self._label_margin,
align=align,
pad=0)
# title + colorbar(with labels) #
lookup = {
'right': (HPacker, reverse),
'left': (HPacker, obverse),
'bottom': (VPacker, reverse),
'top': (VPacker, obverse)}
packer, slc = lookup[self.title_position]
children = [title_box, main_box][slc]
box = packer(children=children,
sep=self._title_margin,
align=self._title_align,
pad=0)
return box
|
def function[draw, parameter[self]]:
constant[
Draw guide
Returns
-------
out : matplotlib.offsetbox.Offsetbox
A drawing of this legend
]
variable[obverse] assign[=] call[name[slice], parameter[constant[0], constant[None]]]
variable[reverse] assign[=] call[name[slice], parameter[constant[None], constant[None], <ast.UnaryOp object at 0x7da18f7235b0>]]
variable[width] assign[=] name[self].barwidth
variable[height] assign[=] name[self].barheight
variable[nbars] assign[=] call[name[len], parameter[name[self].bar]]
variable[length] assign[=] name[height]
variable[direction] assign[=] name[self].direction
variable[colors] assign[=] call[call[name[self].bar][constant[color]].tolist, parameter[]]
variable[labels] assign[=] call[call[name[self].key][constant[label]].tolist, parameter[]]
variable[themeable] assign[=] name[self].theme.figure._themeable
if compare[constant[legend_title] <ast.NotIn object at 0x7da2590d7190> name[themeable]] begin[:]
call[name[themeable]][constant[legend_title]] assign[=] list[[]]
if compare[constant[legend_text_colorbar] <ast.NotIn object at 0x7da2590d7190> name[themeable]] begin[:]
call[name[themeable]][constant[legend_text_colorbar]] assign[=] list[[]]
variable[_from] assign[=] tuple[[<ast.Call object at 0x7da18f721780>, <ast.Call object at 0x7da18f723220>]]
variable[tick_locations] assign[=] binary_operation[binary_operation[call[name[rescale], parameter[call[name[self].key][constant[value]], tuple[[<ast.Constant object at 0x7da18f7235e0>, <ast.BinOp object at 0x7da18f721f60>]], name[_from]]] * name[length]] / name[nbars]]
if compare[name[direction] equal[==] constant[horizontal]] begin[:]
<ast.Tuple object at 0x7da18f723100> assign[=] tuple[[<ast.Name object at 0x7da18f7233a0>, <ast.Name object at 0x7da18f720100>]]
variable[length] assign[=] name[width]
if name[self].reverse begin[:]
variable[colors] assign[=] call[name[colors]][<ast.Slice object at 0x7da18f722620>]
variable[labels] assign[=] call[name[labels]][<ast.Slice object at 0x7da18f720580>]
variable[tick_locations] assign[=] binary_operation[name[length] - call[name[tick_locations]][<ast.Slice object at 0x7da18f720e20>]]
variable[title_box] assign[=] call[name[TextArea], parameter[name[self].title]]
call[call[name[themeable]][constant[legend_title]].append, parameter[name[title_box]]]
variable[da] assign[=] call[name[ColoredDrawingArea], parameter[name[width], name[height], constant[0], constant[0]]]
if name[self].raster begin[:]
call[name[add_interpolated_colorbar], parameter[name[da], name[colors], name[direction]]]
if name[self].ticks begin[:]
variable[_locations] assign[=] name[tick_locations]
if <ast.UnaryOp object at 0x7da18f722260> begin[:]
variable[_locations] assign[=] call[name[_locations]][<ast.Slice object at 0x7da18f723d30>]
if <ast.UnaryOp object at 0x7da18f720910> begin[:]
variable[_locations] assign[=] call[name[_locations]][<ast.Slice object at 0x7da18f721660>]
call[name[add_ticks], parameter[name[da], name[_locations], name[direction]]]
if name[self].label begin[:]
<ast.Tuple object at 0x7da18f7228c0> assign[=] call[name[create_labels], parameter[name[da], name[labels], name[tick_locations], name[direction]]]
call[call[name[themeable]][constant[legend_text_colorbar]].extend, parameter[name[legend_text]]]
if compare[name[direction] equal[==] constant[vertical]] begin[:]
<ast.Tuple object at 0x7da18f720e80> assign[=] tuple[[<ast.Name object at 0x7da204620f40>, <ast.Constant object at 0x7da2046202e0>]]
variable[align] assign[=] constant[center]
variable[slc] assign[=] <ast.IfExp object at 0x7da204622260>
if compare[name[self].label_position in tuple[[<ast.Constant object at 0x7da204961000>, <ast.Constant object at 0x7da204960cd0>]]] begin[:]
variable[slc] assign[=] name[obverse]
variable[main_box] assign[=] call[name[packer], parameter[]]
variable[lookup] assign[=] dictionary[[<ast.Constant object at 0x7da207f997b0>, <ast.Constant object at 0x7da207f9bf70>, <ast.Constant object at 0x7da207f99420>, <ast.Constant object at 0x7da207f9a110>], [<ast.Tuple object at 0x7da207f98340>, <ast.Tuple object at 0x7da207f9bfd0>, <ast.Tuple object at 0x7da207f9b1f0>, <ast.Tuple object at 0x7da207f9b4c0>]]
<ast.Tuple object at 0x7da20c990dc0> assign[=] call[name[lookup]][name[self].title_position]
variable[children] assign[=] call[list[[<ast.Name object at 0x7da20c991ff0>, <ast.Name object at 0x7da20c993610>]]][name[slc]]
variable[box] assign[=] call[name[packer], parameter[]]
return[name[box]]
|
keyword[def] identifier[draw] ( identifier[self] ):
literal[string]
identifier[obverse] = identifier[slice] ( literal[int] , keyword[None] )
identifier[reverse] = identifier[slice] ( keyword[None] , keyword[None] ,- literal[int] )
identifier[width] = identifier[self] . identifier[barwidth]
identifier[height] = identifier[self] . identifier[barheight]
identifier[nbars] = identifier[len] ( identifier[self] . identifier[bar] )
identifier[length] = identifier[height]
identifier[direction] = identifier[self] . identifier[direction]
identifier[colors] = identifier[self] . identifier[bar] [ literal[string] ]. identifier[tolist] ()
identifier[labels] = identifier[self] . identifier[key] [ literal[string] ]. identifier[tolist] ()
identifier[themeable] = identifier[self] . identifier[theme] . identifier[figure] . identifier[_themeable]
keyword[if] literal[string] keyword[not] keyword[in] identifier[themeable] :
identifier[themeable] [ literal[string] ]=[]
keyword[if] literal[string] keyword[not] keyword[in] identifier[themeable] :
identifier[themeable] [ literal[string] ]=[]
identifier[_from] = identifier[self] . identifier[bar] [ literal[string] ]. identifier[min] (), identifier[self] . identifier[bar] [ literal[string] ]. identifier[max] ()
identifier[tick_locations] = identifier[rescale] ( identifier[self] . identifier[key] [ literal[string] ],
( literal[int] , identifier[nbars] - literal[int] ),
identifier[_from] )* identifier[length] / identifier[nbars]
keyword[if] identifier[direction] == literal[string] :
identifier[width] , identifier[height] = identifier[height] , identifier[width]
identifier[length] = identifier[width]
keyword[if] identifier[self] . identifier[reverse] :
identifier[colors] = identifier[colors] [::- literal[int] ]
identifier[labels] = identifier[labels] [::- literal[int] ]
identifier[tick_locations] = identifier[length] - identifier[tick_locations] [::- literal[int] ]
identifier[title_box] = identifier[TextArea] ( identifier[self] . identifier[title] ,
identifier[textprops] = identifier[dict] ( identifier[color] = literal[string] ))
identifier[themeable] [ literal[string] ]. identifier[append] ( identifier[title_box] )
identifier[da] = identifier[ColoredDrawingArea] ( identifier[width] , identifier[height] , literal[int] , literal[int] )
keyword[if] identifier[self] . identifier[raster] :
identifier[add_interpolated_colorbar] ( identifier[da] , identifier[colors] , identifier[direction] )
keyword[else] :
identifier[add_segmented_colorbar] ( identifier[da] , identifier[colors] , identifier[direction] )
keyword[if] identifier[self] . identifier[ticks] :
identifier[_locations] = identifier[tick_locations]
keyword[if] keyword[not] identifier[self] . identifier[draw_ulim] :
identifier[_locations] = identifier[_locations] [:- literal[int] ]
keyword[if] keyword[not] identifier[self] . identifier[draw_llim] :
identifier[_locations] = identifier[_locations] [ literal[int] :]
identifier[add_ticks] ( identifier[da] , identifier[_locations] , identifier[direction] )
keyword[if] identifier[self] . identifier[label] :
identifier[labels_da] , identifier[legend_text] = identifier[create_labels] ( identifier[da] , identifier[labels] ,
identifier[tick_locations] ,
identifier[direction] )
identifier[themeable] [ literal[string] ]. identifier[extend] ( identifier[legend_text] )
keyword[else] :
identifier[labels_da] = identifier[ColoredDrawingArea] ( literal[int] , literal[int] )
keyword[if] identifier[direction] == literal[string] :
identifier[packer] , identifier[align] = identifier[HPacker] , literal[string]
identifier[align] = literal[string]
keyword[else] :
identifier[packer] , identifier[align] = identifier[VPacker] , literal[string]
identifier[align] = literal[string]
identifier[slc] = identifier[obverse] keyword[if] identifier[self] . identifier[label_position] == literal[string] keyword[else] identifier[reverse]
keyword[if] identifier[self] . identifier[label_position] keyword[in] ( literal[string] , literal[string] ):
identifier[slc] = identifier[obverse]
keyword[else] :
identifier[slc] = identifier[reverse]
identifier[main_box] = identifier[packer] ( identifier[children] =[ identifier[da] , identifier[labels_da] ][ identifier[slc] ],
identifier[sep] = identifier[self] . identifier[_label_margin] ,
identifier[align] = identifier[align] ,
identifier[pad] = literal[int] )
identifier[lookup] ={
literal[string] :( identifier[HPacker] , identifier[reverse] ),
literal[string] :( identifier[HPacker] , identifier[obverse] ),
literal[string] :( identifier[VPacker] , identifier[reverse] ),
literal[string] :( identifier[VPacker] , identifier[obverse] )}
identifier[packer] , identifier[slc] = identifier[lookup] [ identifier[self] . identifier[title_position] ]
identifier[children] =[ identifier[title_box] , identifier[main_box] ][ identifier[slc] ]
identifier[box] = identifier[packer] ( identifier[children] = identifier[children] ,
identifier[sep] = identifier[self] . identifier[_title_margin] ,
identifier[align] = identifier[self] . identifier[_title_align] ,
identifier[pad] = literal[int] )
keyword[return] identifier[box]
|
def draw(self):
"""
Draw guide
Returns
-------
out : matplotlib.offsetbox.Offsetbox
A drawing of this legend
"""
obverse = slice(0, None)
reverse = slice(None, None, -1)
width = self.barwidth
height = self.barheight
nbars = len(self.bar)
length = height
direction = self.direction
colors = self.bar['color'].tolist()
labels = self.key['label'].tolist()
themeable = self.theme.figure._themeable
# When there is more than one guide, we keep
# record of all of them using lists
if 'legend_title' not in themeable:
themeable['legend_title'] = [] # depends on [control=['if'], data=['themeable']]
if 'legend_text_colorbar' not in themeable:
themeable['legend_text_colorbar'] = [] # depends on [control=['if'], data=['themeable']]
# .5 puts the ticks in the middle of the bars when
# raster=False. So when raster=True the ticks are
# in between interpolation points and the matching is
# close though not exactly right.
_from = (self.bar['value'].min(), self.bar['value'].max())
tick_locations = rescale(self.key['value'], (0.5, nbars - 0.5), _from) * length / nbars
if direction == 'horizontal':
(width, height) = (height, width)
length = width # depends on [control=['if'], data=[]]
if self.reverse:
colors = colors[::-1]
labels = labels[::-1]
tick_locations = length - tick_locations[::-1] # depends on [control=['if'], data=[]]
# title #
title_box = TextArea(self.title, textprops=dict(color='black'))
themeable['legend_title'].append(title_box)
# colorbar and ticks #
da = ColoredDrawingArea(width, height, 0, 0)
if self.raster:
add_interpolated_colorbar(da, colors, direction) # depends on [control=['if'], data=[]]
else:
add_segmented_colorbar(da, colors, direction)
if self.ticks:
_locations = tick_locations
if not self.draw_ulim:
_locations = _locations[:-1] # depends on [control=['if'], data=[]]
if not self.draw_llim:
_locations = _locations[1:] # depends on [control=['if'], data=[]]
add_ticks(da, _locations, direction) # depends on [control=['if'], data=[]]
# labels #
if self.label:
(labels_da, legend_text) = create_labels(da, labels, tick_locations, direction)
themeable['legend_text_colorbar'].extend(legend_text) # depends on [control=['if'], data=[]]
else:
labels_da = ColoredDrawingArea(0, 0)
# colorbar + labels #
if direction == 'vertical':
(packer, align) = (HPacker, 'bottom')
align = 'center' # depends on [control=['if'], data=[]]
else:
(packer, align) = (VPacker, 'right')
align = 'center'
slc = obverse if self.label_position == 'right' else reverse
if self.label_position in ('right', 'bottom'):
slc = obverse # depends on [control=['if'], data=[]]
else:
slc = reverse
main_box = packer(children=[da, labels_da][slc], sep=self._label_margin, align=align, pad=0)
# title + colorbar(with labels) #
lookup = {'right': (HPacker, reverse), 'left': (HPacker, obverse), 'bottom': (VPacker, reverse), 'top': (VPacker, obverse)}
(packer, slc) = lookup[self.title_position]
children = [title_box, main_box][slc]
box = packer(children=children, sep=self._title_margin, align=self._title_align, pad=0)
return box
|
def can(self, event):
"""
returns a list of states that can result from processing this event
"""
return [t.new_state for t in self._transitions if t.event.equals(event)]
|
def function[can, parameter[self, event]]:
constant[
returns a list of states that can result from processing this event
]
return[<ast.ListComp object at 0x7da1b085c610>]
|
keyword[def] identifier[can] ( identifier[self] , identifier[event] ):
literal[string]
keyword[return] [ identifier[t] . identifier[new_state] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_transitions] keyword[if] identifier[t] . identifier[event] . identifier[equals] ( identifier[event] )]
|
def can(self, event):
"""
returns a list of states that can result from processing this event
"""
return [t.new_state for t in self._transitions if t.event.equals(event)]
|
def push(self, kv):
""" Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
"""
if kv[0] in self:
self.__delitem__(kv[0])
self.__setitem__(kv[0], kv[1])
|
def function[push, parameter[self, kv]]:
constant[ Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
]
if compare[call[name[kv]][constant[0]] in name[self]] begin[:]
call[name[self].__delitem__, parameter[call[name[kv]][constant[0]]]]
call[name[self].__setitem__, parameter[call[name[kv]][constant[0]], call[name[kv]][constant[1]]]]
|
keyword[def] identifier[push] ( identifier[self] , identifier[kv] ):
literal[string]
keyword[if] identifier[kv] [ literal[int] ] keyword[in] identifier[self] :
identifier[self] . identifier[__delitem__] ( identifier[kv] [ literal[int] ])
identifier[self] . identifier[__setitem__] ( identifier[kv] [ literal[int] ], identifier[kv] [ literal[int] ])
|
def push(self, kv):
""" Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
"""
if kv[0] in self:
self.__delitem__(kv[0]) # depends on [control=['if'], data=['self']]
self.__setitem__(kv[0], kv[1])
|
def create_lockfile(self):
"""
Write recursive dependencies list to outfile
with hard-pinned versions.
Then fix it.
"""
process = subprocess.Popen(
self.pin_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
if process.returncode == 0:
self.fix_lockfile()
else:
logger.critical("ERROR executing %s", ' '.join(self.pin_command))
logger.critical("Exit code: %s", process.returncode)
logger.critical(stdout.decode('utf-8'))
logger.critical(stderr.decode('utf-8'))
raise RuntimeError("Failed to pip-compile {0}".format(self.infile))
|
def function[create_lockfile, parameter[self]]:
constant[
Write recursive dependencies list to outfile
with hard-pinned versions.
Then fix it.
]
variable[process] assign[=] call[name[subprocess].Popen, parameter[name[self].pin_command]]
<ast.Tuple object at 0x7da18bcca920> assign[=] call[name[process].communicate, parameter[]]
if compare[name[process].returncode equal[==] constant[0]] begin[:]
call[name[self].fix_lockfile, parameter[]]
|
keyword[def] identifier[create_lockfile] ( identifier[self] ):
literal[string]
identifier[process] = identifier[subprocess] . identifier[Popen] (
identifier[self] . identifier[pin_command] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
)
identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate] ()
keyword[if] identifier[process] . identifier[returncode] == literal[int] :
identifier[self] . identifier[fix_lockfile] ()
keyword[else] :
identifier[logger] . identifier[critical] ( literal[string] , literal[string] . identifier[join] ( identifier[self] . identifier[pin_command] ))
identifier[logger] . identifier[critical] ( literal[string] , identifier[process] . identifier[returncode] )
identifier[logger] . identifier[critical] ( identifier[stdout] . identifier[decode] ( literal[string] ))
identifier[logger] . identifier[critical] ( identifier[stderr] . identifier[decode] ( literal[string] ))
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[self] . identifier[infile] ))
|
def create_lockfile(self):
"""
Write recursive dependencies list to outfile
with hard-pinned versions.
Then fix it.
"""
process = subprocess.Popen(self.pin_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if process.returncode == 0:
self.fix_lockfile() # depends on [control=['if'], data=[]]
else:
logger.critical('ERROR executing %s', ' '.join(self.pin_command))
logger.critical('Exit code: %s', process.returncode)
logger.critical(stdout.decode('utf-8'))
logger.critical(stderr.decode('utf-8'))
raise RuntimeError('Failed to pip-compile {0}'.format(self.infile))
|
def pickNthWeekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek > 4 means last instance"""
first = datetime.datetime(year=year, month=month, hour=hour, minute=minute,
day=1)
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7 + 1))
for n in xrange(whichweek - 1, -1, -1):
dt = weekdayone + n * WEEKS
if dt.month == month:
return dt
|
def function[pickNthWeekday, parameter[year, month, dayofweek, hour, minute, whichweek]]:
constant[dayofweek == 0 means Sunday, whichweek > 4 means last instance]
variable[first] assign[=] call[name[datetime].datetime, parameter[]]
variable[weekdayone] assign[=] call[name[first].replace, parameter[]]
for taget[name[n]] in starred[call[name[xrange], parameter[binary_operation[name[whichweek] - constant[1]], <ast.UnaryOp object at 0x7da20e955f30>, <ast.UnaryOp object at 0x7da20e9550c0>]]] begin[:]
variable[dt] assign[=] binary_operation[name[weekdayone] + binary_operation[name[n] * name[WEEKS]]]
if compare[name[dt].month equal[==] name[month]] begin[:]
return[name[dt]]
|
keyword[def] identifier[pickNthWeekday] ( identifier[year] , identifier[month] , identifier[dayofweek] , identifier[hour] , identifier[minute] , identifier[whichweek] ):
literal[string]
identifier[first] = identifier[datetime] . identifier[datetime] ( identifier[year] = identifier[year] , identifier[month] = identifier[month] , identifier[hour] = identifier[hour] , identifier[minute] = identifier[minute] ,
identifier[day] = literal[int] )
identifier[weekdayone] = identifier[first] . identifier[replace] ( identifier[day] =(( identifier[dayofweek] - identifier[first] . identifier[isoweekday] ())% literal[int] + literal[int] ))
keyword[for] identifier[n] keyword[in] identifier[xrange] ( identifier[whichweek] - literal[int] ,- literal[int] ,- literal[int] ):
identifier[dt] = identifier[weekdayone] + identifier[n] * identifier[WEEKS]
keyword[if] identifier[dt] . identifier[month] == identifier[month] :
keyword[return] identifier[dt]
|
def pickNthWeekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek > 4 means last instance"""
first = datetime.datetime(year=year, month=month, hour=hour, minute=minute, day=1)
weekdayone = first.replace(day=(dayofweek - first.isoweekday()) % 7 + 1)
for n in xrange(whichweek - 1, -1, -1):
dt = weekdayone + n * WEEKS
if dt.month == month:
return dt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
|
def dict_to_paths(dict_):
"""Convert a dict to metric paths.
>>> dict_to_paths({'foo': {'bar': 1}, 'baz': 2})
{
'foo.bar': 1,
'baz': 2,
}
"""
metrics = {}
for k, v in dict_.iteritems():
if isinstance(v, dict):
submetrics = dict_to_paths(v)
for subk, subv in submetrics.iteritems():
metrics['.'.join([str(k), str(subk)])] = subv
else:
metrics[k] = v
return metrics
|
def function[dict_to_paths, parameter[dict_]]:
constant[Convert a dict to metric paths.
>>> dict_to_paths({'foo': {'bar': 1}, 'baz': 2})
{
'foo.bar': 1,
'baz': 2,
}
]
variable[metrics] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18fe91b40>, <ast.Name object at 0x7da18fe93580>]]] in starred[call[name[dict_].iteritems, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[dict]]] begin[:]
variable[submetrics] assign[=] call[name[dict_to_paths], parameter[name[v]]]
for taget[tuple[[<ast.Name object at 0x7da18fe93b50>, <ast.Name object at 0x7da18fe906a0>]]] in starred[call[name[submetrics].iteritems, parameter[]]] begin[:]
call[name[metrics]][call[constant[.].join, parameter[list[[<ast.Call object at 0x7da18fe93850>, <ast.Call object at 0x7da18fe936d0>]]]]] assign[=] name[subv]
return[name[metrics]]
|
keyword[def] identifier[dict_to_paths] ( identifier[dict_] ):
literal[string]
identifier[metrics] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict_] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ):
identifier[submetrics] = identifier[dict_to_paths] ( identifier[v] )
keyword[for] identifier[subk] , identifier[subv] keyword[in] identifier[submetrics] . identifier[iteritems] ():
identifier[metrics] [ literal[string] . identifier[join] ([ identifier[str] ( identifier[k] ), identifier[str] ( identifier[subk] )])]= identifier[subv]
keyword[else] :
identifier[metrics] [ identifier[k] ]= identifier[v]
keyword[return] identifier[metrics]
|
def dict_to_paths(dict_):
"""Convert a dict to metric paths.
>>> dict_to_paths({'foo': {'bar': 1}, 'baz': 2})
{
'foo.bar': 1,
'baz': 2,
}
"""
metrics = {}
for (k, v) in dict_.iteritems():
if isinstance(v, dict):
submetrics = dict_to_paths(v)
for (subk, subv) in submetrics.iteritems():
metrics['.'.join([str(k), str(subk)])] = subv # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
metrics[k] = v # depends on [control=['for'], data=[]]
return metrics
|
def unescape(b, encoding):
'''Unescape all string and unicode literals in bytes.'''
return string_literal_re.sub(
lambda m: unescape_string_literal(m.group(), encoding),
b
)
|
def function[unescape, parameter[b, encoding]]:
constant[Unescape all string and unicode literals in bytes.]
return[call[name[string_literal_re].sub, parameter[<ast.Lambda object at 0x7da1b033ff70>, name[b]]]]
|
keyword[def] identifier[unescape] ( identifier[b] , identifier[encoding] ):
literal[string]
keyword[return] identifier[string_literal_re] . identifier[sub] (
keyword[lambda] identifier[m] : identifier[unescape_string_literal] ( identifier[m] . identifier[group] (), identifier[encoding] ),
identifier[b]
)
|
def unescape(b, encoding):
"""Unescape all string and unicode literals in bytes."""
return string_literal_re.sub(lambda m: unescape_string_literal(m.group(), encoding), b)
|
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
"""
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish())
|
def function[_get_nonce, parameter[self, url]]:
constant[
Get a nonce to use in a request, removing it from the nonces on hand.
]
variable[action] assign[=] call[name[LOG_JWS_GET_NONCE], parameter[]]
if compare[call[name[len], parameter[name[self]._nonces]] greater[>] constant[0]] begin[:]
with name[action] begin[:]
variable[nonce] assign[=] call[name[self]._nonces.pop, parameter[]]
call[name[action].add_success_fields, parameter[]]
return[call[name[succeed], parameter[name[nonce]]]]
|
keyword[def] identifier[_get_nonce] ( identifier[self] , identifier[url] ):
literal[string]
identifier[action] = identifier[LOG_JWS_GET_NONCE] ()
keyword[if] identifier[len] ( identifier[self] . identifier[_nonces] )> literal[int] :
keyword[with] identifier[action] :
identifier[nonce] = identifier[self] . identifier[_nonces] . identifier[pop] ()
identifier[action] . identifier[add_success_fields] ( identifier[nonce] = identifier[nonce] )
keyword[return] identifier[succeed] ( identifier[nonce] )
keyword[else] :
keyword[with] identifier[action] . identifier[context] ():
keyword[return] (
identifier[DeferredContext] ( identifier[self] . identifier[head] ( identifier[url] ))
. identifier[addCallback] ( identifier[self] . identifier[_add_nonce] )
. identifier[addCallback] ( keyword[lambda] identifier[_] : identifier[self] . identifier[_nonces] . identifier[pop] ())
. identifier[addCallback] ( identifier[tap] (
keyword[lambda] identifier[nonce] : identifier[action] . identifier[add_success_fields] ( identifier[nonce] = identifier[nonce] )))
. identifier[addActionFinish] ())
|
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
"""
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
else:
with action.context():
return DeferredContext(self.head(url)).addCallback(self._add_nonce).addCallback(lambda _: self._nonces.pop()).addCallback(tap(lambda nonce: action.add_success_fields(nonce=nonce))).addActionFinish() # depends on [control=['with'], data=[]]
|
def send_bytes(self, bytes):
"""
Send data to DCC peer.
"""
try:
self.socket.send(bytes)
log.debug("TO PEER: %r\n", bytes)
except socket.error:
self.disconnect("Connection reset by peer.")
|
def function[send_bytes, parameter[self, bytes]]:
constant[
Send data to DCC peer.
]
<ast.Try object at 0x7da1b0b44340>
|
keyword[def] identifier[send_bytes] ( identifier[self] , identifier[bytes] ):
literal[string]
keyword[try] :
identifier[self] . identifier[socket] . identifier[send] ( identifier[bytes] )
identifier[log] . identifier[debug] ( literal[string] , identifier[bytes] )
keyword[except] identifier[socket] . identifier[error] :
identifier[self] . identifier[disconnect] ( literal[string] )
|
def send_bytes(self, bytes):
"""
Send data to DCC peer.
"""
try:
self.socket.send(bytes)
log.debug('TO PEER: %r\n', bytes) # depends on [control=['try'], data=[]]
except socket.error:
self.disconnect('Connection reset by peer.') # depends on [control=['except'], data=[]]
|
def parse_polygonal_poi(coords, response):
"""
Parse areal POI way polygons from OSM node coords.
Parameters
----------
coords : dict
dict of node IDs and their lat, lon coordinates
Returns
-------
dict of POIs containing each's nodes, polygon geometry, and osmid
"""
if 'type' in response and response['type'] == 'way':
nodes = response['nodes']
try:
polygon = Polygon([(coords[node]['lon'], coords[node]['lat']) for node in nodes])
poi = {'nodes': nodes,
'geometry': polygon,
'osmid': response['id']}
if 'tags' in response:
for tag in response['tags']:
poi[tag] = response['tags'][tag]
return poi
except Exception:
log('Polygon has invalid geometry: {}'.format(nodes))
return None
|
def function[parse_polygonal_poi, parameter[coords, response]]:
constant[
Parse areal POI way polygons from OSM node coords.
Parameters
----------
coords : dict
dict of node IDs and their lat, lon coordinates
Returns
-------
dict of POIs containing each's nodes, polygon geometry, and osmid
]
if <ast.BoolOp object at 0x7da1b1ceeb30> begin[:]
variable[nodes] assign[=] call[name[response]][constant[nodes]]
<ast.Try object at 0x7da1b1cefe50>
return[constant[None]]
|
keyword[def] identifier[parse_polygonal_poi] ( identifier[coords] , identifier[response] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[response] keyword[and] identifier[response] [ literal[string] ]== literal[string] :
identifier[nodes] = identifier[response] [ literal[string] ]
keyword[try] :
identifier[polygon] = identifier[Polygon] ([( identifier[coords] [ identifier[node] ][ literal[string] ], identifier[coords] [ identifier[node] ][ literal[string] ]) keyword[for] identifier[node] keyword[in] identifier[nodes] ])
identifier[poi] ={ literal[string] : identifier[nodes] ,
literal[string] : identifier[polygon] ,
literal[string] : identifier[response] [ literal[string] ]}
keyword[if] literal[string] keyword[in] identifier[response] :
keyword[for] identifier[tag] keyword[in] identifier[response] [ literal[string] ]:
identifier[poi] [ identifier[tag] ]= identifier[response] [ literal[string] ][ identifier[tag] ]
keyword[return] identifier[poi]
keyword[except] identifier[Exception] :
identifier[log] ( literal[string] . identifier[format] ( identifier[nodes] ))
keyword[return] keyword[None]
|
def parse_polygonal_poi(coords, response):
"""
Parse areal POI way polygons from OSM node coords.
Parameters
----------
coords : dict
dict of node IDs and their lat, lon coordinates
Returns
-------
dict of POIs containing each's nodes, polygon geometry, and osmid
"""
if 'type' in response and response['type'] == 'way':
nodes = response['nodes']
try:
polygon = Polygon([(coords[node]['lon'], coords[node]['lat']) for node in nodes])
poi = {'nodes': nodes, 'geometry': polygon, 'osmid': response['id']}
if 'tags' in response:
for tag in response['tags']:
poi[tag] = response['tags'][tag] # depends on [control=['for'], data=['tag']] # depends on [control=['if'], data=['response']]
return poi # depends on [control=['try'], data=[]]
except Exception:
log('Polygon has invalid geometry: {}'.format(nodes)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return None
|
def has_zero_length_fragments(self, min_index=None, max_index=None):
"""
Return ``True`` if the list has at least one interval
with zero length withing ``min_index`` and ``max_index``.
If the latter are not specified, check all intervals.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
"""
min_index, max_index = self._check_min_max_indices(min_index, max_index)
zero = [i for i in range(min_index, max_index) if self[i].has_zero_length]
self.log([u"Fragments with zero length: %s", zero])
return (len(zero) > 0)
|
def function[has_zero_length_fragments, parameter[self, min_index, max_index]]:
constant[
Return ``True`` if the list has at least one interval
with zero length withing ``min_index`` and ``max_index``.
If the latter are not specified, check all intervals.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
]
<ast.Tuple object at 0x7da1b1525300> assign[=] call[name[self]._check_min_max_indices, parameter[name[min_index], name[max_index]]]
variable[zero] assign[=] <ast.ListComp object at 0x7da1b1524af0>
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b1524be0>, <ast.Name object at 0x7da1b15251b0>]]]]
return[compare[call[name[len], parameter[name[zero]]] greater[>] constant[0]]]
|
keyword[def] identifier[has_zero_length_fragments] ( identifier[self] , identifier[min_index] = keyword[None] , identifier[max_index] = keyword[None] ):
literal[string]
identifier[min_index] , identifier[max_index] = identifier[self] . identifier[_check_min_max_indices] ( identifier[min_index] , identifier[max_index] )
identifier[zero] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[min_index] , identifier[max_index] ) keyword[if] identifier[self] [ identifier[i] ]. identifier[has_zero_length] ]
identifier[self] . identifier[log] ([ literal[string] , identifier[zero] ])
keyword[return] ( identifier[len] ( identifier[zero] )> literal[int] )
|
def has_zero_length_fragments(self, min_index=None, max_index=None):
"""
Return ``True`` if the list has at least one interval
with zero length withing ``min_index`` and ``max_index``.
If the latter are not specified, check all intervals.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
"""
(min_index, max_index) = self._check_min_max_indices(min_index, max_index)
zero = [i for i in range(min_index, max_index) if self[i].has_zero_length]
self.log([u'Fragments with zero length: %s', zero])
return len(zero) > 0
|
def parse_data_line(self, sline):
"""
Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1
"""
# if there are less values founded than headers, it's an error
if len(sline) != len(self._columns):
self.err("One data line has the wrong number of items")
return -1
rawdict = {}
for idx, result in enumerate(sline):
rawdict[self._columns[idx]] = result
# Getting resid
resid = rawdict['Sample name']
del rawdict['Sample name']
# Getting date
rawdict['DateTime'] = self.csvDate2BikaDate(rawdict['Date'], rawdict['Time'])
del rawdict['Date']
del rawdict['Time']
# Getting remarks
rawdict['Remarks'] = rawdict['Remark']
del rawdict['Remark']
# Getting errors
rawdict['Error'] = rawdict['Error/Warning']
if rawdict['Error/Warning']:
self.warn('Analysis warn', numline=self._numline)
del rawdict['Error/Warning']
rawdict['DefaultResult'] = 'Concentration'
self._addRawResult(resid,
{rawdict['Parameter'].replace(' ', ''): rawdict},
False)
return 0
|
def function[parse_data_line, parameter[self, sline]]:
constant[
Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1
]
if compare[call[name[len], parameter[name[sline]]] not_equal[!=] call[name[len], parameter[name[self]._columns]]] begin[:]
call[name[self].err, parameter[constant[One data line has the wrong number of items]]]
return[<ast.UnaryOp object at 0x7da18eb55f00>]
variable[rawdict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18eb54eb0>, <ast.Name object at 0x7da18eb55d20>]]] in starred[call[name[enumerate], parameter[name[sline]]]] begin[:]
call[name[rawdict]][call[name[self]._columns][name[idx]]] assign[=] name[result]
variable[resid] assign[=] call[name[rawdict]][constant[Sample name]]
<ast.Delete object at 0x7da18eb57040>
call[name[rawdict]][constant[DateTime]] assign[=] call[name[self].csvDate2BikaDate, parameter[call[name[rawdict]][constant[Date]], call[name[rawdict]][constant[Time]]]]
<ast.Delete object at 0x7da18eb563e0>
<ast.Delete object at 0x7da18eb54e20>
call[name[rawdict]][constant[Remarks]] assign[=] call[name[rawdict]][constant[Remark]]
<ast.Delete object at 0x7da18eb55420>
call[name[rawdict]][constant[Error]] assign[=] call[name[rawdict]][constant[Error/Warning]]
if call[name[rawdict]][constant[Error/Warning]] begin[:]
call[name[self].warn, parameter[constant[Analysis warn]]]
<ast.Delete object at 0x7da18eb56f20>
call[name[rawdict]][constant[DefaultResult]] assign[=] constant[Concentration]
call[name[self]._addRawResult, parameter[name[resid], dictionary[[<ast.Call object at 0x7da1b1d39030>], [<ast.Name object at 0x7da1b1d650c0>]], constant[False]]]
return[constant[0]]
|
keyword[def] identifier[parse_data_line] ( identifier[self] , identifier[sline] ):
literal[string]
keyword[if] identifier[len] ( identifier[sline] )!= identifier[len] ( identifier[self] . identifier[_columns] ):
identifier[self] . identifier[err] ( literal[string] )
keyword[return] - literal[int]
identifier[rawdict] ={}
keyword[for] identifier[idx] , identifier[result] keyword[in] identifier[enumerate] ( identifier[sline] ):
identifier[rawdict] [ identifier[self] . identifier[_columns] [ identifier[idx] ]]= identifier[result]
identifier[resid] = identifier[rawdict] [ literal[string] ]
keyword[del] identifier[rawdict] [ literal[string] ]
identifier[rawdict] [ literal[string] ]= identifier[self] . identifier[csvDate2BikaDate] ( identifier[rawdict] [ literal[string] ], identifier[rawdict] [ literal[string] ])
keyword[del] identifier[rawdict] [ literal[string] ]
keyword[del] identifier[rawdict] [ literal[string] ]
identifier[rawdict] [ literal[string] ]= identifier[rawdict] [ literal[string] ]
keyword[del] identifier[rawdict] [ literal[string] ]
identifier[rawdict] [ literal[string] ]= identifier[rawdict] [ literal[string] ]
keyword[if] identifier[rawdict] [ literal[string] ]:
identifier[self] . identifier[warn] ( literal[string] , identifier[numline] = identifier[self] . identifier[_numline] )
keyword[del] identifier[rawdict] [ literal[string] ]
identifier[rawdict] [ literal[string] ]= literal[string]
identifier[self] . identifier[_addRawResult] ( identifier[resid] ,
{ identifier[rawdict] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ): identifier[rawdict] },
keyword[False] )
keyword[return] literal[int]
|
def parse_data_line(self, sline):
"""
Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1
"""
# if there are less values founded than headers, it's an error
if len(sline) != len(self._columns):
self.err('One data line has the wrong number of items')
return -1 # depends on [control=['if'], data=[]]
rawdict = {}
for (idx, result) in enumerate(sline):
rawdict[self._columns[idx]] = result # depends on [control=['for'], data=[]]
# Getting resid
resid = rawdict['Sample name']
del rawdict['Sample name']
# Getting date
rawdict['DateTime'] = self.csvDate2BikaDate(rawdict['Date'], rawdict['Time'])
del rawdict['Date']
del rawdict['Time']
# Getting remarks
rawdict['Remarks'] = rawdict['Remark']
del rawdict['Remark']
# Getting errors
rawdict['Error'] = rawdict['Error/Warning']
if rawdict['Error/Warning']:
self.warn('Analysis warn', numline=self._numline) # depends on [control=['if'], data=[]]
del rawdict['Error/Warning']
rawdict['DefaultResult'] = 'Concentration'
self._addRawResult(resid, {rawdict['Parameter'].replace(' ', ''): rawdict}, False)
return 0
|
def MAXSIDE(a, b):
"""maxside: Sort pack by maximum sides"""
return cmp(max(b[0], b[1]), max(a[0], a[1])) or cmp(min(b[0], b[1]), min(a[0], a[1])) or cmp(b[1], a[1]) or cmp(b[0], a[0])
|
def function[MAXSIDE, parameter[a, b]]:
constant[maxside: Sort pack by maximum sides]
return[<ast.BoolOp object at 0x7da1b0d018d0>]
|
keyword[def] identifier[MAXSIDE] ( identifier[a] , identifier[b] ):
literal[string]
keyword[return] identifier[cmp] ( identifier[max] ( identifier[b] [ literal[int] ], identifier[b] [ literal[int] ]), identifier[max] ( identifier[a] [ literal[int] ], identifier[a] [ literal[int] ])) keyword[or] identifier[cmp] ( identifier[min] ( identifier[b] [ literal[int] ], identifier[b] [ literal[int] ]), identifier[min] ( identifier[a] [ literal[int] ], identifier[a] [ literal[int] ])) keyword[or] identifier[cmp] ( identifier[b] [ literal[int] ], identifier[a] [ literal[int] ]) keyword[or] identifier[cmp] ( identifier[b] [ literal[int] ], identifier[a] [ literal[int] ])
|
def MAXSIDE(a, b):
"""maxside: Sort pack by maximum sides"""
return cmp(max(b[0], b[1]), max(a[0], a[1])) or cmp(min(b[0], b[1]), min(a[0], a[1])) or cmp(b[1], a[1]) or cmp(b[0], a[0])
|
def DeserializeFromBufer(buffer, offset=0):
"""
Deserialize object instance from the specified buffer.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
offset: UNUSED
Returns:
Transaction:
"""
mstream = StreamManager.GetStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
StreamManager.ReleaseStream(mstream)
return tx
|
def function[DeserializeFromBufer, parameter[buffer, offset]]:
constant[
Deserialize object instance from the specified buffer.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
offset: UNUSED
Returns:
Transaction:
]
variable[mstream] assign[=] call[name[StreamManager].GetStream, parameter[name[buffer]]]
variable[reader] assign[=] call[name[BinaryReader], parameter[name[mstream]]]
variable[tx] assign[=] call[name[Transaction].DeserializeFrom, parameter[name[reader]]]
call[name[StreamManager].ReleaseStream, parameter[name[mstream]]]
return[name[tx]]
|
keyword[def] identifier[DeserializeFromBufer] ( identifier[buffer] , identifier[offset] = literal[int] ):
literal[string]
identifier[mstream] = identifier[StreamManager] . identifier[GetStream] ( identifier[buffer] )
identifier[reader] = identifier[BinaryReader] ( identifier[mstream] )
identifier[tx] = identifier[Transaction] . identifier[DeserializeFrom] ( identifier[reader] )
identifier[StreamManager] . identifier[ReleaseStream] ( identifier[mstream] )
keyword[return] identifier[tx]
|
def DeserializeFromBufer(buffer, offset=0):
"""
Deserialize object instance from the specified buffer.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
offset: UNUSED
Returns:
Transaction:
"""
mstream = StreamManager.GetStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
StreamManager.ReleaseStream(mstream)
return tx
|
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""
Create a list of records from the marcxml description.
:returns: a list of objects initiated by the function create_record().
Please see that function's docstring.
"""
# Use the DOTALL flag to include newlines.
regex = re.compile('<record.*?>.*?</record>', re.DOTALL)
record_xmls = regex.findall(marcxml)
return [create_record(record_xml, verbose=verbose, correct=correct,
parser=parser, keep_singletons=keep_singletons)
for record_xml in record_xmls]
|
def function[create_records, parameter[marcxml, verbose, correct, parser, keep_singletons]]:
constant[
Create a list of records from the marcxml description.
:returns: a list of objects initiated by the function create_record().
Please see that function's docstring.
]
variable[regex] assign[=] call[name[re].compile, parameter[constant[<record.*?>.*?</record>], name[re].DOTALL]]
variable[record_xmls] assign[=] call[name[regex].findall, parameter[name[marcxml]]]
return[<ast.ListComp object at 0x7da18c4ccd90>]
|
keyword[def] identifier[create_records] ( identifier[marcxml] , identifier[verbose] = identifier[CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL] ,
identifier[correct] = identifier[CFG_BIBRECORD_DEFAULT_CORRECT] , identifier[parser] = literal[string] ,
identifier[keep_singletons] = identifier[CFG_BIBRECORD_KEEP_SINGLETONS] ):
literal[string]
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[DOTALL] )
identifier[record_xmls] = identifier[regex] . identifier[findall] ( identifier[marcxml] )
keyword[return] [ identifier[create_record] ( identifier[record_xml] , identifier[verbose] = identifier[verbose] , identifier[correct] = identifier[correct] ,
identifier[parser] = identifier[parser] , identifier[keep_singletons] = identifier[keep_singletons] )
keyword[for] identifier[record_xml] keyword[in] identifier[record_xmls] ]
|
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='', keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""
Create a list of records from the marcxml description.
:returns: a list of objects initiated by the function create_record().
Please see that function's docstring.
"""
# Use the DOTALL flag to include newlines.
regex = re.compile('<record.*?>.*?</record>', re.DOTALL)
record_xmls = regex.findall(marcxml)
return [create_record(record_xml, verbose=verbose, correct=correct, parser=parser, keep_singletons=keep_singletons) for record_xml in record_xmls]
|
def StartClients(cls, hunt_id, client_ids, token=None):
"""This method is called by the foreman for each client it discovers.
Note that this function is performance sensitive since it is called by the
foreman for every client which needs to be scheduled.
Args:
hunt_id: The hunt to schedule.
client_ids: List of clients that should be added to the hunt.
token: An optional access token to use.
"""
token = token or access_control.ACLToken(username="Hunt", reason="hunting")
with queue_manager.QueueManager(token=token) as flow_manager:
for client_id in client_ids:
# Now we construct a special response which will be sent to the hunt
# flow. Randomize the request_id so we do not overwrite other messages
# in the queue.
state = rdf_flow_runner.RequestState(
id=random.UInt32(),
session_id=hunt_id,
client_id=client_id,
next_state="AddClient")
# Queue the new request.
flow_manager.QueueRequest(state)
# Send a response.
msg = rdf_flows.GrrMessage(
session_id=hunt_id,
request_id=state.id,
response_id=1,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.STATUS,
payload=rdf_flows.GrrStatus())
flow_manager.QueueResponse(msg)
# And notify the worker about it.
flow_manager.QueueNotification(session_id=hunt_id)
|
def function[StartClients, parameter[cls, hunt_id, client_ids, token]]:
constant[This method is called by the foreman for each client it discovers.
Note that this function is performance sensitive since it is called by the
foreman for every client which needs to be scheduled.
Args:
hunt_id: The hunt to schedule.
client_ids: List of clients that should be added to the hunt.
token: An optional access token to use.
]
variable[token] assign[=] <ast.BoolOp object at 0x7da1b1b04340>
with call[name[queue_manager].QueueManager, parameter[]] begin[:]
for taget[name[client_id]] in starred[name[client_ids]] begin[:]
variable[state] assign[=] call[name[rdf_flow_runner].RequestState, parameter[]]
call[name[flow_manager].QueueRequest, parameter[name[state]]]
variable[msg] assign[=] call[name[rdf_flows].GrrMessage, parameter[]]
call[name[flow_manager].QueueResponse, parameter[name[msg]]]
call[name[flow_manager].QueueNotification, parameter[]]
|
keyword[def] identifier[StartClients] ( identifier[cls] , identifier[hunt_id] , identifier[client_ids] , identifier[token] = keyword[None] ):
literal[string]
identifier[token] = identifier[token] keyword[or] identifier[access_control] . identifier[ACLToken] ( identifier[username] = literal[string] , identifier[reason] = literal[string] )
keyword[with] identifier[queue_manager] . identifier[QueueManager] ( identifier[token] = identifier[token] ) keyword[as] identifier[flow_manager] :
keyword[for] identifier[client_id] keyword[in] identifier[client_ids] :
identifier[state] = identifier[rdf_flow_runner] . identifier[RequestState] (
identifier[id] = identifier[random] . identifier[UInt32] (),
identifier[session_id] = identifier[hunt_id] ,
identifier[client_id] = identifier[client_id] ,
identifier[next_state] = literal[string] )
identifier[flow_manager] . identifier[QueueRequest] ( identifier[state] )
identifier[msg] = identifier[rdf_flows] . identifier[GrrMessage] (
identifier[session_id] = identifier[hunt_id] ,
identifier[request_id] = identifier[state] . identifier[id] ,
identifier[response_id] = literal[int] ,
identifier[auth_state] = identifier[rdf_flows] . identifier[GrrMessage] . identifier[AuthorizationState] . identifier[AUTHENTICATED] ,
identifier[type] = identifier[rdf_flows] . identifier[GrrMessage] . identifier[Type] . identifier[STATUS] ,
identifier[payload] = identifier[rdf_flows] . identifier[GrrStatus] ())
identifier[flow_manager] . identifier[QueueResponse] ( identifier[msg] )
identifier[flow_manager] . identifier[QueueNotification] ( identifier[session_id] = identifier[hunt_id] )
|
def StartClients(cls, hunt_id, client_ids, token=None):
"""This method is called by the foreman for each client it discovers.
Note that this function is performance sensitive since it is called by the
foreman for every client which needs to be scheduled.
Args:
hunt_id: The hunt to schedule.
client_ids: List of clients that should be added to the hunt.
token: An optional access token to use.
"""
token = token or access_control.ACLToken(username='Hunt', reason='hunting')
with queue_manager.QueueManager(token=token) as flow_manager:
for client_id in client_ids:
# Now we construct a special response which will be sent to the hunt
# flow. Randomize the request_id so we do not overwrite other messages
# in the queue.
state = rdf_flow_runner.RequestState(id=random.UInt32(), session_id=hunt_id, client_id=client_id, next_state='AddClient')
# Queue the new request.
flow_manager.QueueRequest(state)
# Send a response.
msg = rdf_flows.GrrMessage(session_id=hunt_id, request_id=state.id, response_id=1, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=rdf_flows.GrrStatus())
flow_manager.QueueResponse(msg)
# And notify the worker about it.
flow_manager.QueueNotification(session_id=hunt_id) # depends on [control=['for'], data=['client_id']] # depends on [control=['with'], data=['flow_manager']]
|
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
def function[_handle_wrong_field, parameter[cls, field_name, field_type]]:
constant[Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
]
if compare[name[field_type] equal[==] name[ATTR_TYPE_READ]] begin[:]
variable[field_type] assign[=] constant[readable]
variable[msg] assign[=] call[constant[{} has no {} attribute "{}"].format, parameter[name[cls].__name__, name[field_type], name[field_name]]]
call[name[_logger].error, parameter[name[msg]]]
<ast.Raise object at 0x7da18f813e20>
|
keyword[def] identifier[_handle_wrong_field] ( identifier[cls] , identifier[field_name] , identifier[field_type] ):
literal[string]
keyword[if] identifier[field_type] == identifier[ATTR_TYPE_READ] :
identifier[field_type] = literal[string]
keyword[elif] identifier[field_type] == identifier[ATTR_TYPE_WRITE] :
identifier[field_type] = literal[string]
keyword[elif] identifier[field_type] == identifier[ATTR_TYPE_URL] :
identifier[field_type] = literal[string]
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] (
identifier[field_type]
))
identifier[msg] = literal[string] . identifier[format] (
identifier[cls] . identifier[__name__] ,
identifier[field_type] ,
identifier[field_name]
)
identifier[_logger] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[AttributeError] ( identifier[msg] )
|
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable' # depends on [control=['if'], data=['field_type']]
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable' # depends on [control=['if'], data=['field_type']]
elif field_type == ATTR_TYPE_URL:
field_type = 'URL' # depends on [control=['if'], data=['field_type']]
else:
raise AttributeError('Invalid attribute type: {}'.format(field_type))
msg = '{} has no {} attribute "{}"'.format(cls.__name__, field_type, field_name)
_logger.error(msg)
raise AttributeError(msg)
|
def parse_psimitab(content, fmt='tab27'):
"""https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
"""
columns = [
'Unique identifier for interactor A',
'Unique identifier for interactor B',
'Alternative identifier for interactor A',
'Alternative identifier for interactor B',
'Aliases for A',
'Aliases for B',
'Interaction detection methods',
'First author',
'Identifier of the publication',
'NCBI Taxonomy identifier for interactor A',
'NCBI Taxonomy identifier for interactor B',
'Interaction types',
'Source databases',
'Interaction identifier(s)',
'Confidence score']
columns += [
'Complex expansion',
'Biological role A', 'Biological role B',
'Experimental role A', 'Experimental role B',
'Interactor type A', 'Interactor type B',
'Xref for interactor A', 'Xref for interactor B',
'Xref for the interaction',
'Annotaions for interactor A', 'Annotations for interactor B',
'Annotations for the interaction',
'NCBI Taxonomy identifier for the host organism',
'Prameters of the interaction',
'Creaction date', 'Update date',
'Checksum for the interactor A', 'Checksum for the interactor B',
'Checksum for the interaction',
'negative',
'Feature(s) for interactor A', 'Feature(s) for interactor B',
'Stoichiometry for interactor A', 'Stoichiometroy for interactor B',
'Participant identification method for interactor A',
'Participant identification method for interactor B'
]
if fmt == 'tab25':
columns = columns[: 15]
rexp = re.compile(r"(?P<fields>((\"([^\"]|((?<=\\)\"))*\")|([^\t\"])|((?<=\\)\"))+)(\t|$)")
retval = []
for line in content.split('\n'):
line = line.strip()
if line == '' or line[0] == '#':
continue
start = 0
tmp = []
for mobj in rexp.finditer(line):
if mobj.start() != start:
print(repr(line))
assert mobj.start() == start
start = mobj.end()
tmp.append(mobj.group('fields'))
assert len(tmp) == len(columns)
retval.append(dict(zip(columns, tmp)))
return retval
|
def function[parse_psimitab, parameter[content, fmt]]:
constant[https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
]
variable[columns] assign[=] list[[<ast.Constant object at 0x7da1b0dc0250>, <ast.Constant object at 0x7da1b0dc1de0>, <ast.Constant object at 0x7da1b0dc1540>, <ast.Constant object at 0x7da1b0dc0160>, <ast.Constant object at 0x7da1b0dc06d0>, <ast.Constant object at 0x7da1b0dc1a80>, <ast.Constant object at 0x7da1b0dc1660>, <ast.Constant object at 0x7da1b0dc1180>, <ast.Constant object at 0x7da1b0dc10c0>, <ast.Constant object at 0x7da1b0dc1630>, <ast.Constant object at 0x7da1b0dc1480>, <ast.Constant object at 0x7da1b0dc0fd0>, <ast.Constant object at 0x7da1b0dc1870>, <ast.Constant object at 0x7da1b0dc0100>, <ast.Constant object at 0x7da1b0dc1240>]]
<ast.AugAssign object at 0x7da1b0dc0370>
if compare[name[fmt] equal[==] constant[tab25]] begin[:]
variable[columns] assign[=] call[name[columns]][<ast.Slice object at 0x7da1b0d62d70>]
variable[rexp] assign[=] call[name[re].compile, parameter[constant[(?P<fields>((\"([^\"]|((?<=\\)\"))*\")|([^\t\"])|((?<=\\)\"))+)(\t|$)]]]
variable[retval] assign[=] list[[]]
for taget[name[line]] in starred[call[name[content].split, parameter[constant[
]]]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b0d62dd0> begin[:]
continue
variable[start] assign[=] constant[0]
variable[tmp] assign[=] list[[]]
for taget[name[mobj]] in starred[call[name[rexp].finditer, parameter[name[line]]]] begin[:]
if compare[call[name[mobj].start, parameter[]] not_equal[!=] name[start]] begin[:]
call[name[print], parameter[call[name[repr], parameter[name[line]]]]]
assert[compare[call[name[mobj].start, parameter[]] equal[==] name[start]]]
variable[start] assign[=] call[name[mobj].end, parameter[]]
call[name[tmp].append, parameter[call[name[mobj].group, parameter[constant[fields]]]]]
assert[compare[call[name[len], parameter[name[tmp]]] equal[==] call[name[len], parameter[name[columns]]]]]
call[name[retval].append, parameter[call[name[dict], parameter[call[name[zip], parameter[name[columns], name[tmp]]]]]]]
return[name[retval]]
|
keyword[def] identifier[parse_psimitab] ( identifier[content] , identifier[fmt] = literal[string] ):
literal[string]
identifier[columns] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[columns] +=[
literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string]
]
keyword[if] identifier[fmt] == literal[string] :
identifier[columns] = identifier[columns] [: literal[int] ]
identifier[rexp] = identifier[re] . identifier[compile] ( literal[string] )
identifier[retval] =[]
keyword[for] identifier[line] keyword[in] identifier[content] . identifier[split] ( literal[string] ):
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] == literal[string] keyword[or] identifier[line] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[start] = literal[int]
identifier[tmp] =[]
keyword[for] identifier[mobj] keyword[in] identifier[rexp] . identifier[finditer] ( identifier[line] ):
keyword[if] identifier[mobj] . identifier[start] ()!= identifier[start] :
identifier[print] ( identifier[repr] ( identifier[line] ))
keyword[assert] identifier[mobj] . identifier[start] ()== identifier[start]
identifier[start] = identifier[mobj] . identifier[end] ()
identifier[tmp] . identifier[append] ( identifier[mobj] . identifier[group] ( literal[string] ))
keyword[assert] identifier[len] ( identifier[tmp] )== identifier[len] ( identifier[columns] )
identifier[retval] . identifier[append] ( identifier[dict] ( identifier[zip] ( identifier[columns] , identifier[tmp] )))
keyword[return] identifier[retval]
|
def parse_psimitab(content, fmt='tab27'):
"""https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
"""
columns = ['Unique identifier for interactor A', 'Unique identifier for interactor B', 'Alternative identifier for interactor A', 'Alternative identifier for interactor B', 'Aliases for A', 'Aliases for B', 'Interaction detection methods', 'First author', 'Identifier of the publication', 'NCBI Taxonomy identifier for interactor A', 'NCBI Taxonomy identifier for interactor B', 'Interaction types', 'Source databases', 'Interaction identifier(s)', 'Confidence score']
columns += ['Complex expansion', 'Biological role A', 'Biological role B', 'Experimental role A', 'Experimental role B', 'Interactor type A', 'Interactor type B', 'Xref for interactor A', 'Xref for interactor B', 'Xref for the interaction', 'Annotaions for interactor A', 'Annotations for interactor B', 'Annotations for the interaction', 'NCBI Taxonomy identifier for the host organism', 'Prameters of the interaction', 'Creaction date', 'Update date', 'Checksum for the interactor A', 'Checksum for the interactor B', 'Checksum for the interaction', 'negative', 'Feature(s) for interactor A', 'Feature(s) for interactor B', 'Stoichiometry for interactor A', 'Stoichiometroy for interactor B', 'Participant identification method for interactor A', 'Participant identification method for interactor B']
if fmt == 'tab25':
columns = columns[:15] # depends on [control=['if'], data=[]]
rexp = re.compile('(?P<fields>((\\"([^\\"]|((?<=\\\\)\\"))*\\")|([^\\t\\"])|((?<=\\\\)\\"))+)(\\t|$)')
retval = []
for line in content.split('\n'):
line = line.strip()
if line == '' or line[0] == '#':
continue # depends on [control=['if'], data=[]]
start = 0
tmp = []
for mobj in rexp.finditer(line):
if mobj.start() != start:
print(repr(line)) # depends on [control=['if'], data=[]]
assert mobj.start() == start
start = mobj.end()
tmp.append(mobj.group('fields')) # depends on [control=['for'], data=['mobj']]
assert len(tmp) == len(columns)
retval.append(dict(zip(columns, tmp))) # depends on [control=['for'], data=['line']]
return retval
|
def __SetBaseHeaders(self, http_request, client):
"""Fill in the basic headers on http_request."""
# TODO(craigcitro): Make the default a little better here, and
# include the apitools version.
user_agent = client.user_agent or 'apitools-client/1.0'
http_request.headers['user-agent'] = user_agent
http_request.headers['accept'] = 'application/json'
http_request.headers['accept-encoding'] = 'gzip, deflate'
|
def function[__SetBaseHeaders, parameter[self, http_request, client]]:
constant[Fill in the basic headers on http_request.]
variable[user_agent] assign[=] <ast.BoolOp object at 0x7da1b07f9750>
call[name[http_request].headers][constant[user-agent]] assign[=] name[user_agent]
call[name[http_request].headers][constant[accept]] assign[=] constant[application/json]
call[name[http_request].headers][constant[accept-encoding]] assign[=] constant[gzip, deflate]
|
keyword[def] identifier[__SetBaseHeaders] ( identifier[self] , identifier[http_request] , identifier[client] ):
literal[string]
identifier[user_agent] = identifier[client] . identifier[user_agent] keyword[or] literal[string]
identifier[http_request] . identifier[headers] [ literal[string] ]= identifier[user_agent]
identifier[http_request] . identifier[headers] [ literal[string] ]= literal[string]
identifier[http_request] . identifier[headers] [ literal[string] ]= literal[string]
|
def __SetBaseHeaders(self, http_request, client):
"""Fill in the basic headers on http_request."""
# TODO(craigcitro): Make the default a little better here, and
# include the apitools version.
user_agent = client.user_agent or 'apitools-client/1.0'
http_request.headers['user-agent'] = user_agent
http_request.headers['accept'] = 'application/json'
http_request.headers['accept-encoding'] = 'gzip, deflate'
|
def setup_prjs_signals(self, ):
"""Setup the signals for the projects page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up projects page signals.")
self.prjs_prj_view_pb.clicked.connect(self.prjs_view_prj)
self.prjs_prj_create_pb.clicked.connect(self.prjs_create_prj)
|
def function[setup_prjs_signals, parameter[self]]:
constant[Setup the signals for the projects page
:returns: None
:rtype: None
:raises: None
]
call[name[log].debug, parameter[constant[Setting up projects page signals.]]]
call[name[self].prjs_prj_view_pb.clicked.connect, parameter[name[self].prjs_view_prj]]
call[name[self].prjs_prj_create_pb.clicked.connect, parameter[name[self].prjs_create_prj]]
|
keyword[def] identifier[setup_prjs_signals] ( identifier[self] ,):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[prjs_prj_view_pb] . identifier[clicked] . identifier[connect] ( identifier[self] . identifier[prjs_view_prj] )
identifier[self] . identifier[prjs_prj_create_pb] . identifier[clicked] . identifier[connect] ( identifier[self] . identifier[prjs_create_prj] )
|
def setup_prjs_signals(self):
"""Setup the signals for the projects page
:returns: None
:rtype: None
:raises: None
"""
log.debug('Setting up projects page signals.')
self.prjs_prj_view_pb.clicked.connect(self.prjs_view_prj)
self.prjs_prj_create_pb.clicked.connect(self.prjs_create_prj)
|
def split_candidates(candsfile, featind1, featind2, candsfile1, candsfile2):
""" Split features from one candsfile into two new candsfiles
featind1/2 is list of indices to take from d['features'].
New features and updated state dict go to candsfile1/2.
"""
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl)
features = d['features']
d1 = d.copy()
d2 = d.copy()
d1['features'] = [features[i] for i in featind1]
d2['features'] = [features[i] for i in featind2]
cands1 = {}
cands2 = {}
for key in cands:
cands1[key] = tuple([cands[key][i] for i in featind1])
cands2[key] = tuple([cands[key][i] for i in featind2])
with open(candsfile1, 'w') as pkl:
pickle.dump(d1, pkl, protocol=2)
pickle.dump(cands1, pkl, protocol=2)
with open(candsfile2, 'w') as pkl:
pickle.dump(d2, pkl, protocol=2)
pickle.dump(cands2, pkl, protocol=2)
|
def function[split_candidates, parameter[candsfile, featind1, featind2, candsfile1, candsfile2]]:
constant[ Split features from one candsfile into two new candsfiles
featind1/2 is list of indices to take from d['features'].
New features and updated state dict go to candsfile1/2.
]
with call[name[open], parameter[name[candsfile], constant[rb]]] begin[:]
variable[d] assign[=] call[name[pickle].load, parameter[name[pkl]]]
variable[cands] assign[=] call[name[pickle].load, parameter[name[pkl]]]
variable[features] assign[=] call[name[d]][constant[features]]
variable[d1] assign[=] call[name[d].copy, parameter[]]
variable[d2] assign[=] call[name[d].copy, parameter[]]
call[name[d1]][constant[features]] assign[=] <ast.ListComp object at 0x7da1b2525870>
call[name[d2]][constant[features]] assign[=] <ast.ListComp object at 0x7da1b2525f90>
variable[cands1] assign[=] dictionary[[], []]
variable[cands2] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[cands]] begin[:]
call[name[cands1]][name[key]] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b2524520>]]
call[name[cands2]][name[key]] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b25250c0>]]
with call[name[open], parameter[name[candsfile1], constant[w]]] begin[:]
call[name[pickle].dump, parameter[name[d1], name[pkl]]]
call[name[pickle].dump, parameter[name[cands1], name[pkl]]]
with call[name[open], parameter[name[candsfile2], constant[w]]] begin[:]
call[name[pickle].dump, parameter[name[d2], name[pkl]]]
call[name[pickle].dump, parameter[name[cands2], name[pkl]]]
|
keyword[def] identifier[split_candidates] ( identifier[candsfile] , identifier[featind1] , identifier[featind2] , identifier[candsfile1] , identifier[candsfile2] ):
literal[string]
keyword[with] identifier[open] ( identifier[candsfile] , literal[string] ) keyword[as] identifier[pkl] :
identifier[d] = identifier[pickle] . identifier[load] ( identifier[pkl] )
identifier[cands] = identifier[pickle] . identifier[load] ( identifier[pkl] )
identifier[features] = identifier[d] [ literal[string] ]
identifier[d1] = identifier[d] . identifier[copy] ()
identifier[d2] = identifier[d] . identifier[copy] ()
identifier[d1] [ literal[string] ]=[ identifier[features] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[featind1] ]
identifier[d2] [ literal[string] ]=[ identifier[features] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[featind2] ]
identifier[cands1] ={}
identifier[cands2] ={}
keyword[for] identifier[key] keyword[in] identifier[cands] :
identifier[cands1] [ identifier[key] ]= identifier[tuple] ([ identifier[cands] [ identifier[key] ][ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[featind1] ])
identifier[cands2] [ identifier[key] ]= identifier[tuple] ([ identifier[cands] [ identifier[key] ][ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[featind2] ])
keyword[with] identifier[open] ( identifier[candsfile1] , literal[string] ) keyword[as] identifier[pkl] :
identifier[pickle] . identifier[dump] ( identifier[d1] , identifier[pkl] , identifier[protocol] = literal[int] )
identifier[pickle] . identifier[dump] ( identifier[cands1] , identifier[pkl] , identifier[protocol] = literal[int] )
keyword[with] identifier[open] ( identifier[candsfile2] , literal[string] ) keyword[as] identifier[pkl] :
identifier[pickle] . identifier[dump] ( identifier[d2] , identifier[pkl] , identifier[protocol] = literal[int] )
identifier[pickle] . identifier[dump] ( identifier[cands2] , identifier[pkl] , identifier[protocol] = literal[int] )
|
def split_candidates(candsfile, featind1, featind2, candsfile1, candsfile2):
""" Split features from one candsfile into two new candsfiles
featind1/2 is list of indices to take from d['features'].
New features and updated state dict go to candsfile1/2.
"""
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl) # depends on [control=['with'], data=['pkl']]
features = d['features']
d1 = d.copy()
d2 = d.copy()
d1['features'] = [features[i] for i in featind1]
d2['features'] = [features[i] for i in featind2]
cands1 = {}
cands2 = {}
for key in cands:
cands1[key] = tuple([cands[key][i] for i in featind1])
cands2[key] = tuple([cands[key][i] for i in featind2]) # depends on [control=['for'], data=['key']]
with open(candsfile1, 'w') as pkl:
pickle.dump(d1, pkl, protocol=2)
pickle.dump(cands1, pkl, protocol=2) # depends on [control=['with'], data=['pkl']]
with open(candsfile2, 'w') as pkl:
pickle.dump(d2, pkl, protocol=2)
pickle.dump(cands2, pkl, protocol=2) # depends on [control=['with'], data=['pkl']]
|
def deploy_template(access_token, subscription_id, resource_group, deployment_name, template,
parameters):
'''Deploy a template referenced by a JSON string, with parameters as a JSON string.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template (str): String representatipn of a JSON template body.
parameters (str): String representation of a JSON template parameters body.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'?api-version=', DEPLOYMENTS_API])
properties = {'template': template}
properties['mode'] = 'Incremental'
properties['parameters'] = parameters
template_body = {'properties': properties}
body = json.dumps(template_body)
return do_put(endpoint, body, access_token)
|
def function[deploy_template, parameter[access_token, subscription_id, resource_group, deployment_name, template, parameters]]:
constant[Deploy a template referenced by a JSON string, with parameters as a JSON string.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template (str): String representatipn of a JSON template body.
parameters (str): String representation of a JSON template parameters body.
Returns:
HTTP response.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b05aece0>, <ast.Constant object at 0x7da1b05aef80>, <ast.Name object at 0x7da1b05af040>, <ast.Constant object at 0x7da1b05af280>, <ast.Name object at 0x7da1b05af820>, <ast.Constant object at 0x7da1b05af880>, <ast.Name object at 0x7da1b05af910>, <ast.Constant object at 0x7da1b05af6d0>, <ast.Name object at 0x7da1b05aef50>]]]]
variable[properties] assign[=] dictionary[[<ast.Constant object at 0x7da1b05af310>], [<ast.Name object at 0x7da1b05af3d0>]]
call[name[properties]][constant[mode]] assign[=] constant[Incremental]
call[name[properties]][constant[parameters]] assign[=] name[parameters]
variable[template_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b05af460>], [<ast.Name object at 0x7da1b05af250>]]
variable[body] assign[=] call[name[json].dumps, parameter[name[template_body]]]
return[call[name[do_put], parameter[name[endpoint], name[body], name[access_token]]]]
|
keyword[def] identifier[deploy_template] ( identifier[access_token] , identifier[subscription_id] , identifier[resource_group] , identifier[deployment_name] , identifier[template] ,
identifier[parameters] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[resource_group] ,
literal[string] , identifier[deployment_name] ,
literal[string] , identifier[DEPLOYMENTS_API] ])
identifier[properties] ={ literal[string] : identifier[template] }
identifier[properties] [ literal[string] ]= literal[string]
identifier[properties] [ literal[string] ]= identifier[parameters]
identifier[template_body] ={ literal[string] : identifier[properties] }
identifier[body] = identifier[json] . identifier[dumps] ( identifier[template_body] )
keyword[return] identifier[do_put] ( identifier[endpoint] , identifier[body] , identifier[access_token] )
|
def deploy_template(access_token, subscription_id, resource_group, deployment_name, template, parameters):
"""Deploy a template referenced by a JSON string, with parameters as a JSON string.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
deployment_name (str): A name you give to the deployment.
template (str): String representatipn of a JSON template body.
parameters (str): String representation of a JSON template parameters body.
Returns:
HTTP response.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API])
properties = {'template': template}
properties['mode'] = 'Incremental'
properties['parameters'] = parameters
template_body = {'properties': properties}
body = json.dumps(template_body)
return do_put(endpoint, body, access_token)
|
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
|
def function[get_endpoint_name, parameter[self, headers]]:
constant[Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
]
variable[match] assign[=] <ast.BoolOp object at 0x7da1b1953b80>
if name[match] begin[:]
return[call[call[name[match].split, parameter[constant[.]]]][constant[1]]]
|
keyword[def] identifier[get_endpoint_name] ( identifier[self] , identifier[headers] ):
literal[string]
identifier[match] = identifier[headers] . identifier[get] ( literal[string] ) keyword[or] identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[match] :
keyword[return] identifier[match] . identifier[split] ( literal[string] )[ literal[int] ]
|
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split('.')[1] # depends on [control=['if'], data=[]]
|
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool
|
def function[runPowerNotificationsThread, parameter[self]]:
constant[Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop.]
variable[pool] assign[=] call[call[name[NSAutoreleasePool].alloc, parameter[]].init, parameter[]]
def function[on_power_source_notification, parameter[context]]:
with name[self]._lock begin[:]
for taget[name[weak_observer]] in starred[name[self]._weak_observers] begin[:]
variable[observer] assign[=] call[name[weak_observer], parameter[]]
if name[observer] begin[:]
call[name[observer].on_power_source_notification, parameter[]]
name[self]._source assign[=] call[name[IOPSNotificationCreateRunLoopSource], parameter[name[on_power_source_notification], constant[None]]]
call[name[CFRunLoopAddSource], parameter[call[call[name[NSRunLoop].currentRunLoop, parameter[]].getCFRunLoop, parameter[]], name[self]._source, name[kCFRunLoopDefaultMode]]]
while <ast.UnaryOp object at 0x7da1b11bf040> begin[:]
call[call[name[NSRunLoop].currentRunLoop, parameter[]].runMode_beforeDate_, parameter[name[NSDefaultRunLoopMode], call[name[NSDate].distantFuture, parameter[]]]]
<ast.Delete object at 0x7da1b11bf8b0>
|
keyword[def] identifier[runPowerNotificationsThread] ( identifier[self] ):
literal[string]
identifier[pool] = identifier[NSAutoreleasePool] . identifier[alloc] (). identifier[init] ()
@ identifier[objc] . identifier[callbackFor] ( identifier[IOPSNotificationCreateRunLoopSource] )
keyword[def] identifier[on_power_source_notification] ( identifier[context] ):
keyword[with] identifier[self] . identifier[_lock] :
keyword[for] identifier[weak_observer] keyword[in] identifier[self] . identifier[_weak_observers] :
identifier[observer] = identifier[weak_observer] ()
keyword[if] identifier[observer] :
identifier[observer] . identifier[on_power_source_notification] ()
identifier[self] . identifier[_source] = identifier[IOPSNotificationCreateRunLoopSource] ( identifier[on_power_source_notification] , keyword[None] )
identifier[CFRunLoopAddSource] ( identifier[NSRunLoop] . identifier[currentRunLoop] (). identifier[getCFRunLoop] (), identifier[self] . identifier[_source] , identifier[kCFRunLoopDefaultMode] )
keyword[while] keyword[not] identifier[NSThread] . identifier[currentThread] (). identifier[isCancelled] ():
identifier[NSRunLoop] . identifier[currentRunLoop] (). identifier[runMode_beforeDate_] ( identifier[NSDefaultRunLoopMode] , identifier[NSDate] . identifier[distantFuture] ())
keyword[del] identifier[pool]
|
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['weak_observer']] # depends on [control=['with'], data=[]]
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture()) # depends on [control=['while'], data=[]]
del pool
|
def get_data_coeficientes_perfilado_2017(force_download=False):
"""Extrae la información de las dos hojas del Excel proporcionado por REE
con los perfiles iniciales para 2017.
:param force_download: Descarga el fichero 'raw' del servidor, en vez de acudir a la copia local.
:return: perfiles_2017, coefs_alpha_beta_gamma
:rtype: tuple
"""
path_perfs = os.path.join(STORAGE_DIR, 'perfiles_consumo_2017.h5')
if force_download or not os.path.exists(path_perfs):
# Coeficientes de perfilado y demanda de referencia (1ª hoja)
cols_sheet1 = ['Mes', 'Día', 'Hora',
'Pa,0m,d,h', 'Pb,0m,d,h', 'Pc,0m,d,h', 'Pd,0m,d,h', 'Demanda de Referencia 2017 (MW)']
perfs_2017 = pd.read_excel(URL_PERFILES_2017, header=None, skiprows=[0, 1], names=cols_sheet1)
perfs_2017['ts'] = pd.DatetimeIndex(start='2017-01-01', freq='H', tz=TZ, end='2017-12-31 23:59')
perfs_2017 = perfs_2017.set_index('ts').drop(['Mes', 'Día', 'Hora'], axis=1)
# Coefs Alfa, Beta, Gamma (2ª hoja):
coefs_alpha_beta_gamma = pd.read_excel(URL_PERFILES_2017, sheetname=1)
print('Escribiendo perfiles 2017 en disco, en {}'.format(path_perfs))
with pd.HDFStore(path_perfs, 'w') as st:
st.put('coefs', coefs_alpha_beta_gamma)
st.put('perfiles', perfs_2017)
print('HDFStore de tamaño {:.3f} KB'.format(os.path.getsize(path_perfs) / 1000))
else:
with pd.HDFStore(path_perfs, 'r') as st:
coefs_alpha_beta_gamma = st['coefs']
perfs_2017 = st['perfiles']
return perfs_2017, coefs_alpha_beta_gamma
|
def function[get_data_coeficientes_perfilado_2017, parameter[force_download]]:
constant[Extrae la información de las dos hojas del Excel proporcionado por REE
con los perfiles iniciales para 2017.
:param force_download: Descarga el fichero 'raw' del servidor, en vez de acudir a la copia local.
:return: perfiles_2017, coefs_alpha_beta_gamma
:rtype: tuple
]
variable[path_perfs] assign[=] call[name[os].path.join, parameter[name[STORAGE_DIR], constant[perfiles_consumo_2017.h5]]]
if <ast.BoolOp object at 0x7da1afea9c00> begin[:]
variable[cols_sheet1] assign[=] list[[<ast.Constant object at 0x7da1afeabe80>, <ast.Constant object at 0x7da1afea8ee0>, <ast.Constant object at 0x7da1afea9330>, <ast.Constant object at 0x7da1afeabd60>, <ast.Constant object at 0x7da1afea9f60>, <ast.Constant object at 0x7da1afea8520>, <ast.Constant object at 0x7da1afea9b10>, <ast.Constant object at 0x7da1afea90c0>]]
variable[perfs_2017] assign[=] call[name[pd].read_excel, parameter[name[URL_PERFILES_2017]]]
call[name[perfs_2017]][constant[ts]] assign[=] call[name[pd].DatetimeIndex, parameter[]]
variable[perfs_2017] assign[=] call[call[name[perfs_2017].set_index, parameter[constant[ts]]].drop, parameter[list[[<ast.Constant object at 0x7da1afeab6d0>, <ast.Constant object at 0x7da1afea9150>, <ast.Constant object at 0x7da1afea8b80>]]]]
variable[coefs_alpha_beta_gamma] assign[=] call[name[pd].read_excel, parameter[name[URL_PERFILES_2017]]]
call[name[print], parameter[call[constant[Escribiendo perfiles 2017 en disco, en {}].format, parameter[name[path_perfs]]]]]
with call[name[pd].HDFStore, parameter[name[path_perfs], constant[w]]] begin[:]
call[name[st].put, parameter[constant[coefs], name[coefs_alpha_beta_gamma]]]
call[name[st].put, parameter[constant[perfiles], name[perfs_2017]]]
call[name[print], parameter[call[constant[HDFStore de tamaño {:.3f} KB].format, parameter[binary_operation[call[name[os].path.getsize, parameter[name[path_perfs]]] / constant[1000]]]]]]
return[tuple[[<ast.Name object at 0x7da18bccb9d0>, <ast.Name object at 0x7da18bcc8a60>]]]
|
keyword[def] identifier[get_data_coeficientes_perfilado_2017] ( identifier[force_download] = keyword[False] ):
literal[string]
identifier[path_perfs] = identifier[os] . identifier[path] . identifier[join] ( identifier[STORAGE_DIR] , literal[string] )
keyword[if] identifier[force_download] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path_perfs] ):
identifier[cols_sheet1] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[perfs_2017] = identifier[pd] . identifier[read_excel] ( identifier[URL_PERFILES_2017] , identifier[header] = keyword[None] , identifier[skiprows] =[ literal[int] , literal[int] ], identifier[names] = identifier[cols_sheet1] )
identifier[perfs_2017] [ literal[string] ]= identifier[pd] . identifier[DatetimeIndex] ( identifier[start] = literal[string] , identifier[freq] = literal[string] , identifier[tz] = identifier[TZ] , identifier[end] = literal[string] )
identifier[perfs_2017] = identifier[perfs_2017] . identifier[set_index] ( literal[string] ). identifier[drop] ([ literal[string] , literal[string] , literal[string] ], identifier[axis] = literal[int] )
identifier[coefs_alpha_beta_gamma] = identifier[pd] . identifier[read_excel] ( identifier[URL_PERFILES_2017] , identifier[sheetname] = literal[int] )
identifier[print] ( literal[string] . identifier[format] ( identifier[path_perfs] ))
keyword[with] identifier[pd] . identifier[HDFStore] ( identifier[path_perfs] , literal[string] ) keyword[as] identifier[st] :
identifier[st] . identifier[put] ( literal[string] , identifier[coefs_alpha_beta_gamma] )
identifier[st] . identifier[put] ( literal[string] , identifier[perfs_2017] )
identifier[print] ( literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[getsize] ( identifier[path_perfs] )/ literal[int] ))
keyword[else] :
keyword[with] identifier[pd] . identifier[HDFStore] ( identifier[path_perfs] , literal[string] ) keyword[as] identifier[st] :
identifier[coefs_alpha_beta_gamma] = identifier[st] [ literal[string] ]
identifier[perfs_2017] = identifier[st] [ literal[string] ]
keyword[return] identifier[perfs_2017] , identifier[coefs_alpha_beta_gamma]
|
def get_data_coeficientes_perfilado_2017(force_download=False):
"""Extrae la información de las dos hojas del Excel proporcionado por REE
con los perfiles iniciales para 2017.
:param force_download: Descarga el fichero 'raw' del servidor, en vez de acudir a la copia local.
:return: perfiles_2017, coefs_alpha_beta_gamma
:rtype: tuple
"""
path_perfs = os.path.join(STORAGE_DIR, 'perfiles_consumo_2017.h5')
if force_download or not os.path.exists(path_perfs):
# Coeficientes de perfilado y demanda de referencia (1ª hoja)
cols_sheet1 = ['Mes', 'Día', 'Hora', 'Pa,0m,d,h', 'Pb,0m,d,h', 'Pc,0m,d,h', 'Pd,0m,d,h', 'Demanda de Referencia 2017 (MW)']
perfs_2017 = pd.read_excel(URL_PERFILES_2017, header=None, skiprows=[0, 1], names=cols_sheet1)
perfs_2017['ts'] = pd.DatetimeIndex(start='2017-01-01', freq='H', tz=TZ, end='2017-12-31 23:59')
perfs_2017 = perfs_2017.set_index('ts').drop(['Mes', 'Día', 'Hora'], axis=1)
# Coefs Alfa, Beta, Gamma (2ª hoja):
coefs_alpha_beta_gamma = pd.read_excel(URL_PERFILES_2017, sheetname=1)
print('Escribiendo perfiles 2017 en disco, en {}'.format(path_perfs))
with pd.HDFStore(path_perfs, 'w') as st:
st.put('coefs', coefs_alpha_beta_gamma)
st.put('perfiles', perfs_2017) # depends on [control=['with'], data=['st']]
print('HDFStore de tamaño {:.3f} KB'.format(os.path.getsize(path_perfs) / 1000)) # depends on [control=['if'], data=[]]
else:
with pd.HDFStore(path_perfs, 'r') as st:
coefs_alpha_beta_gamma = st['coefs']
perfs_2017 = st['perfiles'] # depends on [control=['with'], data=['st']]
return (perfs_2017, coefs_alpha_beta_gamma)
|
def _DeepCopy(self, obj):
"""Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue.
"""
precondition.AssertType(obj, rdfvalue.RDFValue)
return obj.__class__.FromSerializedString(obj.SerializeToString())
|
def function[_DeepCopy, parameter[self, obj]]:
constant[Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue.
]
call[name[precondition].AssertType, parameter[name[obj], name[rdfvalue].RDFValue]]
return[call[name[obj].__class__.FromSerializedString, parameter[call[name[obj].SerializeToString, parameter[]]]]]
|
keyword[def] identifier[_DeepCopy] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[precondition] . identifier[AssertType] ( identifier[obj] , identifier[rdfvalue] . identifier[RDFValue] )
keyword[return] identifier[obj] . identifier[__class__] . identifier[FromSerializedString] ( identifier[obj] . identifier[SerializeToString] ())
|
def _DeepCopy(self, obj):
"""Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue.
"""
precondition.AssertType(obj, rdfvalue.RDFValue)
return obj.__class__.FromSerializedString(obj.SerializeToString())
|
def _fetch(self, resource, params):
"""Fetch a resource.
:param resource: resource to get
:param params: dict with the HTTP parameters needed to get
the given resource
"""
url = self.URL % {'resource': resource}
params[self.PTOKEN] = self.api_token
logger.debug("Slack client requests: %s params: %s",
resource, str(params))
r = self.fetch(url, payload=params)
# Check for possible API errors
result = r.json()
if not result['ok']:
raise SlackClientError(error=result['error'])
return r.text
|
def function[_fetch, parameter[self, resource, params]]:
constant[Fetch a resource.
:param resource: resource to get
:param params: dict with the HTTP parameters needed to get
the given resource
]
variable[url] assign[=] binary_operation[name[self].URL <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b02f3b50>], [<ast.Name object at 0x7da1b02f33d0>]]]
call[name[params]][name[self].PTOKEN] assign[=] name[self].api_token
call[name[logger].debug, parameter[constant[Slack client requests: %s params: %s], name[resource], call[name[str], parameter[name[params]]]]]
variable[r] assign[=] call[name[self].fetch, parameter[name[url]]]
variable[result] assign[=] call[name[r].json, parameter[]]
if <ast.UnaryOp object at 0x7da1b02f37c0> begin[:]
<ast.Raise object at 0x7da1b02f3430>
return[name[r].text]
|
keyword[def] identifier[_fetch] ( identifier[self] , identifier[resource] , identifier[params] ):
literal[string]
identifier[url] = identifier[self] . identifier[URL] %{ literal[string] : identifier[resource] }
identifier[params] [ identifier[self] . identifier[PTOKEN] ]= identifier[self] . identifier[api_token]
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[resource] , identifier[str] ( identifier[params] ))
identifier[r] = identifier[self] . identifier[fetch] ( identifier[url] , identifier[payload] = identifier[params] )
identifier[result] = identifier[r] . identifier[json] ()
keyword[if] keyword[not] identifier[result] [ literal[string] ]:
keyword[raise] identifier[SlackClientError] ( identifier[error] = identifier[result] [ literal[string] ])
keyword[return] identifier[r] . identifier[text]
|
def _fetch(self, resource, params):
"""Fetch a resource.
:param resource: resource to get
:param params: dict with the HTTP parameters needed to get
the given resource
"""
url = self.URL % {'resource': resource}
params[self.PTOKEN] = self.api_token
logger.debug('Slack client requests: %s params: %s', resource, str(params))
r = self.fetch(url, payload=params)
# Check for possible API errors
result = r.json()
if not result['ok']:
raise SlackClientError(error=result['error']) # depends on [control=['if'], data=[]]
return r.text
|
def K(self, parm):
""" Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
"""
return Periodic_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10)
|
def function[K, parameter[self, parm]]:
constant[ Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
]
return[binary_operation[call[name[Periodic_K_matrix], parameter[name[self].X, name[parm]]] + binary_operation[call[name[np].identity, parameter[call[name[self].X.shape][constant[0]]]] * binary_operation[constant[10] ** <ast.UnaryOp object at 0x7da20e9b18d0>]]]]
|
keyword[def] identifier[K] ( identifier[self] , identifier[parm] ):
literal[string]
keyword[return] identifier[Periodic_K_matrix] ( identifier[self] . identifier[X] , identifier[parm] )+ identifier[np] . identifier[identity] ( identifier[self] . identifier[X] . identifier[shape] [ literal[int] ])*( literal[int] **- literal[int] )
|
def K(self, parm):
""" Returns the Gram Matrix
Parameters
----------
parm : np.ndarray
Parameters for the Gram Matrix
Returns
----------
- Gram Matrix (np.ndarray)
"""
return Periodic_K_matrix(self.X, parm) + np.identity(self.X.shape[0]) * 10 ** (-10)
|
def fake2db_sqlite_initiator(self, number_of_rows, name=None, custom=None):
'''Main handler for the operation
'''
rows = number_of_rows
conn = self.database_caller_creator(name)
if custom:
self.custom_db_creator(rows, conn, custom)
conn.close()
sys.exit(0)
self.data_filler_simple_registration(rows, conn)
self.data_filler_detailed_registration(rows, conn)
self.data_filler_company(rows, conn)
self.data_filler_user_agent(rows, conn)
self.data_filler_customer(rows, conn)
conn.close()
|
def function[fake2db_sqlite_initiator, parameter[self, number_of_rows, name, custom]]:
constant[Main handler for the operation
]
variable[rows] assign[=] name[number_of_rows]
variable[conn] assign[=] call[name[self].database_caller_creator, parameter[name[name]]]
if name[custom] begin[:]
call[name[self].custom_db_creator, parameter[name[rows], name[conn], name[custom]]]
call[name[conn].close, parameter[]]
call[name[sys].exit, parameter[constant[0]]]
call[name[self].data_filler_simple_registration, parameter[name[rows], name[conn]]]
call[name[self].data_filler_detailed_registration, parameter[name[rows], name[conn]]]
call[name[self].data_filler_company, parameter[name[rows], name[conn]]]
call[name[self].data_filler_user_agent, parameter[name[rows], name[conn]]]
call[name[self].data_filler_customer, parameter[name[rows], name[conn]]]
call[name[conn].close, parameter[]]
|
keyword[def] identifier[fake2db_sqlite_initiator] ( identifier[self] , identifier[number_of_rows] , identifier[name] = keyword[None] , identifier[custom] = keyword[None] ):
literal[string]
identifier[rows] = identifier[number_of_rows]
identifier[conn] = identifier[self] . identifier[database_caller_creator] ( identifier[name] )
keyword[if] identifier[custom] :
identifier[self] . identifier[custom_db_creator] ( identifier[rows] , identifier[conn] , identifier[custom] )
identifier[conn] . identifier[close] ()
identifier[sys] . identifier[exit] ( literal[int] )
identifier[self] . identifier[data_filler_simple_registration] ( identifier[rows] , identifier[conn] )
identifier[self] . identifier[data_filler_detailed_registration] ( identifier[rows] , identifier[conn] )
identifier[self] . identifier[data_filler_company] ( identifier[rows] , identifier[conn] )
identifier[self] . identifier[data_filler_user_agent] ( identifier[rows] , identifier[conn] )
identifier[self] . identifier[data_filler_customer] ( identifier[rows] , identifier[conn] )
identifier[conn] . identifier[close] ()
|
def fake2db_sqlite_initiator(self, number_of_rows, name=None, custom=None):
"""Main handler for the operation
"""
rows = number_of_rows
conn = self.database_caller_creator(name)
if custom:
self.custom_db_creator(rows, conn, custom)
conn.close()
sys.exit(0) # depends on [control=['if'], data=[]]
self.data_filler_simple_registration(rows, conn)
self.data_filler_detailed_registration(rows, conn)
self.data_filler_company(rows, conn)
self.data_filler_user_agent(rows, conn)
self.data_filler_customer(rows, conn)
conn.close()
|
def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1, ] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None)
filtered = filtered[sl]
return filtered + baseline
|
def function[gaussian_filter, parameter[data, sigma]]:
constant[
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
]
if call[name[np].isscalar, parameter[name[sigma]]] begin[:]
variable[sigma] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da18c4cd7e0>]] * name[data].ndim]
variable[baseline] assign[=] call[name[data].mean, parameter[]]
variable[filtered] assign[=] binary_operation[name[data] - name[baseline]]
for taget[name[ax]] in starred[call[name[range], parameter[name[data].ndim]]] begin[:]
variable[s] assign[=] call[name[float], parameter[call[name[sigma]][name[ax]]]]
if compare[name[s] equal[==] constant[0]] begin[:]
continue
variable[ksize] assign[=] call[name[int], parameter[binary_operation[name[s] * constant[6]]]]
variable[x] assign[=] call[name[np].arange, parameter[<ast.UnaryOp object at 0x7da18c4cc730>, name[ksize]]]
variable[kernel] assign[=] call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da18c4ce260> / binary_operation[constant[2] * binary_operation[name[s] ** constant[2]]]]]]
variable[kshape] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18c4ccfd0>]] * name[data].ndim]
call[name[kshape]][name[ax]] assign[=] call[name[len], parameter[name[kernel]]]
variable[kernel] assign[=] call[name[kernel].reshape, parameter[name[kshape]]]
variable[shape] assign[=] binary_operation[call[name[data].shape][name[ax]] + name[ksize]]
variable[scale] assign[=] binary_operation[constant[1.0] / binary_operation[call[name[abs], parameter[name[s]]] * binary_operation[binary_operation[constant[2] * name[np].pi] ** constant[0.5]]]]
variable[filtered] assign[=] binary_operation[name[scale] * call[name[np].fft.irfft, parameter[binary_operation[call[name[np].fft.rfft, parameter[name[filtered], name[shape]]] * call[name[np].fft.rfft, parameter[name[kernel], name[shape]]]]]]]
variable[sl] assign[=] binary_operation[list[[<ast.Call object at 0x7da18c4cfb50>]] * name[data].ndim]
call[name[sl]][name[ax]] assign[=] call[name[slice], parameter[binary_operation[call[name[filtered].shape][name[ax]] - call[name[data].shape][name[ax]]], constant[None], constant[None]]]
variable[filtered] assign[=] call[name[filtered]][name[sl]]
return[binary_operation[name[filtered] + name[baseline]]]
|
keyword[def] identifier[gaussian_filter] ( identifier[data] , identifier[sigma] ):
literal[string]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[sigma] ):
identifier[sigma] =( identifier[sigma] ,)* identifier[data] . identifier[ndim]
identifier[baseline] = identifier[data] . identifier[mean] ()
identifier[filtered] = identifier[data] - identifier[baseline]
keyword[for] identifier[ax] keyword[in] identifier[range] ( identifier[data] . identifier[ndim] ):
identifier[s] = identifier[float] ( identifier[sigma] [ identifier[ax] ])
keyword[if] identifier[s] == literal[int] :
keyword[continue]
identifier[ksize] = identifier[int] ( identifier[s] * literal[int] )
identifier[x] = identifier[np] . identifier[arange] (- identifier[ksize] , identifier[ksize] )
identifier[kernel] = identifier[np] . identifier[exp] (- identifier[x] ** literal[int] /( literal[int] * identifier[s] ** literal[int] ))
identifier[kshape] =[ literal[int] ,]* identifier[data] . identifier[ndim]
identifier[kshape] [ identifier[ax] ]= identifier[len] ( identifier[kernel] )
identifier[kernel] = identifier[kernel] . identifier[reshape] ( identifier[kshape] )
identifier[shape] = identifier[data] . identifier[shape] [ identifier[ax] ]+ identifier[ksize]
identifier[scale] = literal[int] /( identifier[abs] ( identifier[s] )*( literal[int] * identifier[np] . identifier[pi] )** literal[int] )
identifier[filtered] = identifier[scale] * identifier[np] . identifier[fft] . identifier[irfft] ( identifier[np] . identifier[fft] . identifier[rfft] ( identifier[filtered] , identifier[shape] , identifier[axis] = identifier[ax] )*
identifier[np] . identifier[fft] . identifier[rfft] ( identifier[kernel] , identifier[shape] , identifier[axis] = identifier[ax] ),
identifier[axis] = identifier[ax] )
identifier[sl] =[ identifier[slice] ( keyword[None] )]* identifier[data] . identifier[ndim]
identifier[sl] [ identifier[ax] ]= identifier[slice] ( identifier[filtered] . identifier[shape] [ identifier[ax] ]- identifier[data] . identifier[shape] [ identifier[ax] ], keyword[None] , keyword[None] )
identifier[filtered] = identifier[filtered] [ identifier[sl] ]
keyword[return] identifier[filtered] + identifier[baseline]
|
def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim # depends on [control=['if'], data=[]]
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue # depends on [control=['if'], data=[]]
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x ** 2 / (2 * s ** 2))
kshape = [1] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2 * np.pi) ** 0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * np.fft.rfft(kernel, shape, axis=ax), axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax] - data.shape[ax], None, None)
filtered = filtered[sl] # depends on [control=['for'], data=['ax']]
return filtered + baseline
|
def has_object_permission(self, request, view, obj):
"""Check object permissions."""
# admins can do anything
if request.user.is_superuser:
return True
# `share` permission is required for editing permissions
if 'permissions' in view.action:
self.perms_map['POST'] = ['%(app_label)s.share_%(model_name)s']
if view.action in ['add_data', 'remove_data']:
self.perms_map['POST'] = ['%(app_label)s.add_%(model_name)s']
if hasattr(view, 'get_queryset'):
queryset = view.get_queryset()
else:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not set `.queryset` or have a `.get_queryset()` method.'
)
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj) and not AnonymousUser().has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in permissions.SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
|
def function[has_object_permission, parameter[self, request, view, obj]]:
constant[Check object permissions.]
if name[request].user.is_superuser begin[:]
return[constant[True]]
if compare[constant[permissions] in name[view].action] begin[:]
call[name[self].perms_map][constant[POST]] assign[=] list[[<ast.Constant object at 0x7da1b1b68190>]]
if compare[name[view].action in list[[<ast.Constant object at 0x7da1b1b6b760>, <ast.Constant object at 0x7da1b1b6a500>]]] begin[:]
call[name[self].perms_map][constant[POST]] assign[=] list[[<ast.Constant object at 0x7da1b1b6af20>]]
if call[name[hasattr], parameter[name[view], constant[get_queryset]]] begin[:]
variable[queryset] assign[=] call[name[view].get_queryset, parameter[]]
assert[compare[name[queryset] is_not constant[None]]]
variable[model_cls] assign[=] name[queryset].model
variable[user] assign[=] name[request].user
variable[perms] assign[=] call[name[self].get_required_object_permissions, parameter[name[request].method, name[model_cls]]]
if <ast.BoolOp object at 0x7da1b1af8c70> begin[:]
if compare[name[request].method in name[permissions].SAFE_METHODS] begin[:]
<ast.Raise object at 0x7da1b1adf640>
variable[read_perms] assign[=] call[name[self].get_required_object_permissions, parameter[constant[GET], name[model_cls]]]
if <ast.UnaryOp object at 0x7da1b1adc490> begin[:]
<ast.Raise object at 0x7da1b1add180>
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[has_object_permission] ( identifier[self] , identifier[request] , identifier[view] , identifier[obj] ):
literal[string]
keyword[if] identifier[request] . identifier[user] . identifier[is_superuser] :
keyword[return] keyword[True]
keyword[if] literal[string] keyword[in] identifier[view] . identifier[action] :
identifier[self] . identifier[perms_map] [ literal[string] ]=[ literal[string] ]
keyword[if] identifier[view] . identifier[action] keyword[in] [ literal[string] , literal[string] ]:
identifier[self] . identifier[perms_map] [ literal[string] ]=[ literal[string] ]
keyword[if] identifier[hasattr] ( identifier[view] , literal[string] ):
identifier[queryset] = identifier[view] . identifier[get_queryset] ()
keyword[else] :
identifier[queryset] = identifier[getattr] ( identifier[view] , literal[string] , keyword[None] )
keyword[assert] identifier[queryset] keyword[is] keyword[not] keyword[None] ,(
literal[string]
literal[string]
)
identifier[model_cls] = identifier[queryset] . identifier[model]
identifier[user] = identifier[request] . identifier[user]
identifier[perms] = identifier[self] . identifier[get_required_object_permissions] ( identifier[request] . identifier[method] , identifier[model_cls] )
keyword[if] keyword[not] identifier[user] . identifier[has_perms] ( identifier[perms] , identifier[obj] ) keyword[and] keyword[not] identifier[AnonymousUser] (). identifier[has_perms] ( identifier[perms] , identifier[obj] ):
keyword[if] identifier[request] . identifier[method] keyword[in] identifier[permissions] . identifier[SAFE_METHODS] :
keyword[raise] identifier[Http404]
identifier[read_perms] = identifier[self] . identifier[get_required_object_permissions] ( literal[string] , identifier[model_cls] )
keyword[if] keyword[not] identifier[user] . identifier[has_perms] ( identifier[read_perms] , identifier[obj] ):
keyword[raise] identifier[Http404]
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def has_object_permission(self, request, view, obj):
"""Check object permissions."""
# admins can do anything
if request.user.is_superuser:
return True # depends on [control=['if'], data=[]]
# `share` permission is required for editing permissions
if 'permissions' in view.action:
self.perms_map['POST'] = ['%(app_label)s.share_%(model_name)s'] # depends on [control=['if'], data=[]]
if view.action in ['add_data', 'remove_data']:
self.perms_map['POST'] = ['%(app_label)s.add_%(model_name)s'] # depends on [control=['if'], data=[]]
if hasattr(view, 'get_queryset'):
queryset = view.get_queryset() # depends on [control=['if'], data=[]]
else:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, 'Cannot apply DjangoObjectPermissions on a view that does not set `.queryset` or have a `.get_queryset()` method.'
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj) and (not AnonymousUser().has_perms(perms, obj)):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in permissions.SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404 # depends on [control=['if'], data=[]]
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404 # depends on [control=['if'], data=[]]
# Has read permissions.
return False # depends on [control=['if'], data=[]]
return True
|
def check_actors(self):
"""
Checks the actors of the owner. Raises an exception if invalid.
"""
actors = []
for actor in self.owner.actors:
if actor.skip:
continue
actors.append(actor)
if len(actors) == 0:
return
if not self.allow_source and base.is_source(actors[0]):
raise Exception("Actor '" + actors[0].full_name + "' is a source, but no sources allowed!")
for i in xrange(1, len(actors)):
if not isinstance(actors[i], InputConsumer):
raise Exception("Actor does not accept any input: " + actors[i].full_name)
|
def function[check_actors, parameter[self]]:
constant[
Checks the actors of the owner. Raises an exception if invalid.
]
variable[actors] assign[=] list[[]]
for taget[name[actor]] in starred[name[self].owner.actors] begin[:]
if name[actor].skip begin[:]
continue
call[name[actors].append, parameter[name[actor]]]
if compare[call[name[len], parameter[name[actors]]] equal[==] constant[0]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da18dc064d0> begin[:]
<ast.Raise object at 0x7da18dc07cd0>
for taget[name[i]] in starred[call[name[xrange], parameter[constant[1], call[name[len], parameter[name[actors]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da18dc049a0> begin[:]
<ast.Raise object at 0x7da18dc067d0>
|
keyword[def] identifier[check_actors] ( identifier[self] ):
literal[string]
identifier[actors] =[]
keyword[for] identifier[actor] keyword[in] identifier[self] . identifier[owner] . identifier[actors] :
keyword[if] identifier[actor] . identifier[skip] :
keyword[continue]
identifier[actors] . identifier[append] ( identifier[actor] )
keyword[if] identifier[len] ( identifier[actors] )== literal[int] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[allow_source] keyword[and] identifier[base] . identifier[is_source] ( identifier[actors] [ literal[int] ]):
keyword[raise] identifier[Exception] ( literal[string] + identifier[actors] [ literal[int] ]. identifier[full_name] + literal[string] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[actors] )):
keyword[if] keyword[not] identifier[isinstance] ( identifier[actors] [ identifier[i] ], identifier[InputConsumer] ):
keyword[raise] identifier[Exception] ( literal[string] + identifier[actors] [ identifier[i] ]. identifier[full_name] )
|
def check_actors(self):
"""
Checks the actors of the owner. Raises an exception if invalid.
"""
actors = []
for actor in self.owner.actors:
if actor.skip:
continue # depends on [control=['if'], data=[]]
actors.append(actor) # depends on [control=['for'], data=['actor']]
if len(actors) == 0:
return # depends on [control=['if'], data=[]]
if not self.allow_source and base.is_source(actors[0]):
raise Exception("Actor '" + actors[0].full_name + "' is a source, but no sources allowed!") # depends on [control=['if'], data=[]]
for i in xrange(1, len(actors)):
if not isinstance(actors[i], InputConsumer):
raise Exception('Actor does not accept any input: ' + actors[i].full_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
|
def disable_host_flap_detection(self, host):
"""Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = []
self.send_an_element(host.get_update_status_brok())
|
def function[disable_host_flap_detection, parameter[self, host]]:
constant[Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
]
if name[host].flap_detection_enabled begin[:]
<ast.AugAssign object at 0x7da18f58c9a0>
name[host].flap_detection_enabled assign[=] constant[False]
if name[host].is_flapping begin[:]
name[host].is_flapping assign[=] constant[False]
name[host].flapping_changes assign[=] list[[]]
call[name[self].send_an_element, parameter[call[name[host].get_update_status_brok, parameter[]]]]
|
keyword[def] identifier[disable_host_flap_detection] ( identifier[self] , identifier[host] ):
literal[string]
keyword[if] identifier[host] . identifier[flap_detection_enabled] :
identifier[host] . identifier[modified_attributes] |= identifier[DICT_MODATTR] [ literal[string] ]. identifier[value]
identifier[host] . identifier[flap_detection_enabled] = keyword[False]
keyword[if] identifier[host] . identifier[is_flapping] :
identifier[host] . identifier[is_flapping] = keyword[False]
identifier[host] . identifier[flapping_changes] =[]
identifier[self] . identifier[send_an_element] ( identifier[host] . identifier[get_update_status_brok] ())
|
def disable_host_flap_detection(self, host):
"""Disable flap detection for a host
Format of the line that triggers function call::
DISABLE_HOST_FLAP_DETECTION;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.flap_detection_enabled:
host.modified_attributes |= DICT_MODATTR['MODATTR_FLAP_DETECTION_ENABLED'].value
host.flap_detection_enabled = False
# Maybe the host was flapping, if so, stop flapping
if host.is_flapping:
host.is_flapping = False
host.flapping_changes = [] # depends on [control=['if'], data=[]]
self.send_an_element(host.get_update_status_brok()) # depends on [control=['if'], data=[]]
|
def _parse_path(self):
"""
Parse the storage path in the config.
Returns:
str
"""
if self.engine == ENGINE_DROPBOX:
path = get_dropbox_folder_location()
elif self.engine == ENGINE_GDRIVE:
path = get_google_drive_folder_location()
elif self.engine == ENGINE_COPY:
path = get_copy_folder_location()
elif self.engine == ENGINE_ICLOUD:
path = get_icloud_folder_location()
elif self.engine == ENGINE_BOX:
path = get_box_folder_location()
elif self.engine == ENGINE_FS:
if self._parser.has_option('storage', 'path'):
cfg_path = self._parser.get('storage', 'path')
path = os.path.join(os.environ['HOME'], cfg_path)
else:
raise ConfigError("The required 'path' can't be found while"
" the 'file_system' engine is used.")
return str(path)
|
def function[_parse_path, parameter[self]]:
constant[
Parse the storage path in the config.
Returns:
str
]
if compare[name[self].engine equal[==] name[ENGINE_DROPBOX]] begin[:]
variable[path] assign[=] call[name[get_dropbox_folder_location], parameter[]]
return[call[name[str], parameter[name[path]]]]
|
keyword[def] identifier[_parse_path] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[engine] == identifier[ENGINE_DROPBOX] :
identifier[path] = identifier[get_dropbox_folder_location] ()
keyword[elif] identifier[self] . identifier[engine] == identifier[ENGINE_GDRIVE] :
identifier[path] = identifier[get_google_drive_folder_location] ()
keyword[elif] identifier[self] . identifier[engine] == identifier[ENGINE_COPY] :
identifier[path] = identifier[get_copy_folder_location] ()
keyword[elif] identifier[self] . identifier[engine] == identifier[ENGINE_ICLOUD] :
identifier[path] = identifier[get_icloud_folder_location] ()
keyword[elif] identifier[self] . identifier[engine] == identifier[ENGINE_BOX] :
identifier[path] = identifier[get_box_folder_location] ()
keyword[elif] identifier[self] . identifier[engine] == identifier[ENGINE_FS] :
keyword[if] identifier[self] . identifier[_parser] . identifier[has_option] ( literal[string] , literal[string] ):
identifier[cfg_path] = identifier[self] . identifier[_parser] . identifier[get] ( literal[string] , literal[string] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[environ] [ literal[string] ], identifier[cfg_path] )
keyword[else] :
keyword[raise] identifier[ConfigError] ( literal[string]
literal[string] )
keyword[return] identifier[str] ( identifier[path] )
|
def _parse_path(self):
"""
Parse the storage path in the config.
Returns:
str
"""
if self.engine == ENGINE_DROPBOX:
path = get_dropbox_folder_location() # depends on [control=['if'], data=[]]
elif self.engine == ENGINE_GDRIVE:
path = get_google_drive_folder_location() # depends on [control=['if'], data=[]]
elif self.engine == ENGINE_COPY:
path = get_copy_folder_location() # depends on [control=['if'], data=[]]
elif self.engine == ENGINE_ICLOUD:
path = get_icloud_folder_location() # depends on [control=['if'], data=[]]
elif self.engine == ENGINE_BOX:
path = get_box_folder_location() # depends on [control=['if'], data=[]]
elif self.engine == ENGINE_FS:
if self._parser.has_option('storage', 'path'):
cfg_path = self._parser.get('storage', 'path')
path = os.path.join(os.environ['HOME'], cfg_path) # depends on [control=['if'], data=[]]
else:
raise ConfigError("The required 'path' can't be found while the 'file_system' engine is used.") # depends on [control=['if'], data=[]]
return str(path)
|
def parse_format(self):
"""Check format parameter.
All formats values listed in the specification are lowercase
alphanumeric value commonly used as file extensions. To leave
opportunity for extension here just do a limited sanity check
on characters and length.
"""
if (self.format is not None and
not re.match(r'''\w{1,20}$''', self.format)):
raise IIIFRequestError(
parameter='format',
text='Bad format parameter')
|
def function[parse_format, parameter[self]]:
constant[Check format parameter.
All formats values listed in the specification are lowercase
alphanumeric value commonly used as file extensions. To leave
opportunity for extension here just do a limited sanity check
on characters and length.
]
if <ast.BoolOp object at 0x7da1b031cc40> begin[:]
<ast.Raise object at 0x7da1b0466470>
|
keyword[def] identifier[parse_format] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[format] keyword[is] keyword[not] keyword[None] keyword[and]
keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[self] . identifier[format] )):
keyword[raise] identifier[IIIFRequestError] (
identifier[parameter] = literal[string] ,
identifier[text] = literal[string] )
|
def parse_format(self):
"""Check format parameter.
All formats values listed in the specification are lowercase
alphanumeric value commonly used as file extensions. To leave
opportunity for extension here just do a limited sanity check
on characters and length.
"""
if self.format is not None and (not re.match('\\w{1,20}$', self.format)):
raise IIIFRequestError(parameter='format', text='Bad format parameter') # depends on [control=['if'], data=[]]
|
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
'''Signed 64-bit field'''
return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range)
|
def function[SInt64, parameter[value, min_value, max_value, encoder, fuzzable, name, full_range]]:
constant[Signed 64-bit field]
return[call[name[BitField], parameter[name[value], constant[64]]]]
|
keyword[def] identifier[SInt64] ( identifier[value] , identifier[min_value] = keyword[None] , identifier[max_value] = keyword[None] , identifier[encoder] = identifier[ENC_INT_DEFAULT] , identifier[fuzzable] = keyword[True] , identifier[name] = keyword[None] , identifier[full_range] = keyword[False] ):
literal[string]
keyword[return] identifier[BitField] ( identifier[value] , literal[int] , identifier[signed] = keyword[True] , identifier[min_value] = identifier[min_value] , identifier[max_value] = identifier[max_value] , identifier[encoder] = identifier[encoder] , identifier[fuzzable] = identifier[fuzzable] , identifier[name] = identifier[name] , identifier[full_range] = identifier[full_range] )
|
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
"""Signed 64-bit field"""
return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range)
|
def plugin_counts(self):
"""plugin_counts
Returns the plugin counts as dictionary with the last updated info if
its available.
"""
ret = {
'total': 0,
}
# As ususal, we need data before we can actually do anything ;)
data = self.raw_query('plugin', 'init')
# For backwards compatability purposes, we will be handling this a bit
# differently than I would like. We are going to check to see if each
# value exists and override the default value of 0. The only value that
# I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't
# listed in the API docs, however return back from my experimentation.
ret['total'] = data['pluginCount']
if 'lastUpdates' in data:
for item in ['active', 'passive', 'compliance', 'custom', 'event']:
itemdata = {}
if item in data['lastUpdates']:
itemdata = data['lastUpdates'][item]
if item in data:
itemdata['count'] = data[item]
else:
itemdata['count'] = 0
ret[item] = itemdata
return ret
|
def function[plugin_counts, parameter[self]]:
constant[plugin_counts
Returns the plugin counts as dictionary with the last updated info if
its available.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2865c00>], [<ast.Constant object at 0x7da1b2865450>]]
variable[data] assign[=] call[name[self].raw_query, parameter[constant[plugin], constant[init]]]
call[name[ret]][constant[total]] assign[=] call[name[data]][constant[pluginCount]]
if compare[constant[lastUpdates] in name[data]] begin[:]
for taget[name[item]] in starred[list[[<ast.Constant object at 0x7da1b28640a0>, <ast.Constant object at 0x7da1b2864dc0>, <ast.Constant object at 0x7da1b2866110>, <ast.Constant object at 0x7da1b2867c70>, <ast.Constant object at 0x7da1b2866ec0>]]] begin[:]
variable[itemdata] assign[=] dictionary[[], []]
if compare[name[item] in call[name[data]][constant[lastUpdates]]] begin[:]
variable[itemdata] assign[=] call[call[name[data]][constant[lastUpdates]]][name[item]]
if compare[name[item] in name[data]] begin[:]
call[name[itemdata]][constant[count]] assign[=] call[name[data]][name[item]]
call[name[ret]][name[item]] assign[=] name[itemdata]
return[name[ret]]
|
keyword[def] identifier[plugin_counts] ( identifier[self] ):
literal[string]
identifier[ret] ={
literal[string] : literal[int] ,
}
identifier[data] = identifier[self] . identifier[raw_query] ( literal[string] , literal[string] )
identifier[ret] [ literal[string] ]= identifier[data] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[data] :
keyword[for] identifier[item] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[itemdata] ={}
keyword[if] identifier[item] keyword[in] identifier[data] [ literal[string] ]:
identifier[itemdata] = identifier[data] [ literal[string] ][ identifier[item] ]
keyword[if] identifier[item] keyword[in] identifier[data] :
identifier[itemdata] [ literal[string] ]= identifier[data] [ identifier[item] ]
keyword[else] :
identifier[itemdata] [ literal[string] ]= literal[int]
identifier[ret] [ identifier[item] ]= identifier[itemdata]
keyword[return] identifier[ret]
|
def plugin_counts(self):
"""plugin_counts
Returns the plugin counts as dictionary with the last updated info if
its available.
"""
ret = {'total': 0}
# As ususal, we need data before we can actually do anything ;)
data = self.raw_query('plugin', 'init')
# For backwards compatability purposes, we will be handling this a bit
# differently than I would like. We are going to check to see if each
# value exists and override the default value of 0. The only value that
# I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't
# listed in the API docs, however return back from my experimentation.
ret['total'] = data['pluginCount']
if 'lastUpdates' in data:
for item in ['active', 'passive', 'compliance', 'custom', 'event']:
itemdata = {}
if item in data['lastUpdates']:
itemdata = data['lastUpdates'][item] # depends on [control=['if'], data=['item']]
if item in data:
itemdata['count'] = data[item] # depends on [control=['if'], data=['item', 'data']]
else:
itemdata['count'] = 0
ret[item] = itemdata # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=['data']]
return ret
|
def run_multiple_processes(args_list: List[List[str]],
die_on_failure: bool = True) -> None:
"""
Fire up multiple processes, and wait for them to finihs.
Args:
args_list: command arguments for each process
die_on_failure: see :func:`wait_for_processes`
"""
for procargs in args_list:
start_process(procargs)
# Wait for them all to finish
wait_for_processes(die_on_failure=die_on_failure)
|
def function[run_multiple_processes, parameter[args_list, die_on_failure]]:
constant[
Fire up multiple processes, and wait for them to finihs.
Args:
args_list: command arguments for each process
die_on_failure: see :func:`wait_for_processes`
]
for taget[name[procargs]] in starred[name[args_list]] begin[:]
call[name[start_process], parameter[name[procargs]]]
call[name[wait_for_processes], parameter[]]
|
keyword[def] identifier[run_multiple_processes] ( identifier[args_list] : identifier[List] [ identifier[List] [ identifier[str] ]],
identifier[die_on_failure] : identifier[bool] = keyword[True] )-> keyword[None] :
literal[string]
keyword[for] identifier[procargs] keyword[in] identifier[args_list] :
identifier[start_process] ( identifier[procargs] )
identifier[wait_for_processes] ( identifier[die_on_failure] = identifier[die_on_failure] )
|
def run_multiple_processes(args_list: List[List[str]], die_on_failure: bool=True) -> None:
"""
Fire up multiple processes, and wait for them to finihs.
Args:
args_list: command arguments for each process
die_on_failure: see :func:`wait_for_processes`
"""
for procargs in args_list:
start_process(procargs) # depends on [control=['for'], data=['procargs']]
# Wait for them all to finish
wait_for_processes(die_on_failure=die_on_failure)
|
def process_exception(self, request, exception):
"""
Return a redirect response for the :class:`~fluent_contents.extensions.HttpRedirectRequest`
"""
if isinstance(exception, HttpRedirectRequest):
return HttpResponseRedirect(exception.url, status=exception.status)
else:
return None
|
def function[process_exception, parameter[self, request, exception]]:
constant[
Return a redirect response for the :class:`~fluent_contents.extensions.HttpRedirectRequest`
]
if call[name[isinstance], parameter[name[exception], name[HttpRedirectRequest]]] begin[:]
return[call[name[HttpResponseRedirect], parameter[name[exception].url]]]
|
keyword[def] identifier[process_exception] ( identifier[self] , identifier[request] , identifier[exception] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[exception] , identifier[HttpRedirectRequest] ):
keyword[return] identifier[HttpResponseRedirect] ( identifier[exception] . identifier[url] , identifier[status] = identifier[exception] . identifier[status] )
keyword[else] :
keyword[return] keyword[None]
|
def process_exception(self, request, exception):
"""
Return a redirect response for the :class:`~fluent_contents.extensions.HttpRedirectRequest`
"""
if isinstance(exception, HttpRedirectRequest):
return HttpResponseRedirect(exception.url, status=exception.status) # depends on [control=['if'], data=[]]
else:
return None
|
def popitem(self):
"""remove the next prioritized [key, val, priority] and return it"""
pq = self.pq
while pq:
priority, key, val = heapq.heappop(pq)
if val is None:
self.removed_count -= 1
else:
del self.item_finder[key]
return key, val, priority
raise KeyError("pop from an empty priority queue")
|
def function[popitem, parameter[self]]:
constant[remove the next prioritized [key, val, priority] and return it]
variable[pq] assign[=] name[self].pq
while name[pq] begin[:]
<ast.Tuple object at 0x7da20c6e4ca0> assign[=] call[name[heapq].heappop, parameter[name[pq]]]
if compare[name[val] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c6e7820>
<ast.Raise object at 0x7da20c6e45e0>
|
keyword[def] identifier[popitem] ( identifier[self] ):
literal[string]
identifier[pq] = identifier[self] . identifier[pq]
keyword[while] identifier[pq] :
identifier[priority] , identifier[key] , identifier[val] = identifier[heapq] . identifier[heappop] ( identifier[pq] )
keyword[if] identifier[val] keyword[is] keyword[None] :
identifier[self] . identifier[removed_count] -= literal[int]
keyword[else] :
keyword[del] identifier[self] . identifier[item_finder] [ identifier[key] ]
keyword[return] identifier[key] , identifier[val] , identifier[priority]
keyword[raise] identifier[KeyError] ( literal[string] )
|
def popitem(self):
"""remove the next prioritized [key, val, priority] and return it"""
pq = self.pq
while pq:
(priority, key, val) = heapq.heappop(pq)
if val is None:
self.removed_count -= 1 # depends on [control=['if'], data=[]]
else:
del self.item_finder[key]
return (key, val, priority) # depends on [control=['while'], data=[]]
raise KeyError('pop from an empty priority queue')
|
def on_backward_begin(self, smooth_loss:Tensor, **kwargs:Any)->None:
"Record the loss before any other callback has a chance to modify it."
self.losses.append(smooth_loss)
if self.pbar is not None and hasattr(self.pbar,'child'):
self.pbar.child.comment = f'{smooth_loss:.4f}'
|
def function[on_backward_begin, parameter[self, smooth_loss]]:
constant[Record the loss before any other callback has a chance to modify it.]
call[name[self].losses.append, parameter[name[smooth_loss]]]
if <ast.BoolOp object at 0x7da20e9b2a10> begin[:]
name[self].pbar.child.comment assign[=] <ast.JoinedStr object at 0x7da20e9b0670>
|
keyword[def] identifier[on_backward_begin] ( identifier[self] , identifier[smooth_loss] : identifier[Tensor] ,** identifier[kwargs] : identifier[Any] )-> keyword[None] :
literal[string]
identifier[self] . identifier[losses] . identifier[append] ( identifier[smooth_loss] )
keyword[if] identifier[self] . identifier[pbar] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] . identifier[pbar] , literal[string] ):
identifier[self] . identifier[pbar] . identifier[child] . identifier[comment] = literal[string]
|
def on_backward_begin(self, smooth_loss: Tensor, **kwargs: Any) -> None:
"""Record the loss before any other callback has a chance to modify it."""
self.losses.append(smooth_loss)
if self.pbar is not None and hasattr(self.pbar, 'child'):
self.pbar.child.comment = f'{smooth_loss:.4f}' # depends on [control=['if'], data=[]]
|
def add_path(self, nodes, **attr):
"""In replacement for Deprecated add_path method"""
if nx.__version__[0] == "1":
return super().add_path(nodes, **attr)
else:
return nx.add_path(self, nodes, **attr)
|
def function[add_path, parameter[self, nodes]]:
constant[In replacement for Deprecated add_path method]
if compare[call[name[nx].__version__][constant[0]] equal[==] constant[1]] begin[:]
return[call[call[name[super], parameter[]].add_path, parameter[name[nodes]]]]
|
keyword[def] identifier[add_path] ( identifier[self] , identifier[nodes] ,** identifier[attr] ):
literal[string]
keyword[if] identifier[nx] . identifier[__version__] [ literal[int] ]== literal[string] :
keyword[return] identifier[super] (). identifier[add_path] ( identifier[nodes] ,** identifier[attr] )
keyword[else] :
keyword[return] identifier[nx] . identifier[add_path] ( identifier[self] , identifier[nodes] ,** identifier[attr] )
|
def add_path(self, nodes, **attr):
"""In replacement for Deprecated add_path method"""
if nx.__version__[0] == '1':
return super().add_path(nodes, **attr) # depends on [control=['if'], data=[]]
else:
return nx.add_path(self, nodes, **attr)
|
def make_final_message(version, error, codewords):
"""\
Constructs the final message (codewords incl. error correction).
ISO/IEC 18004:2015(E) -- 7.6 Constructing the final message codeword sequence (page 45)
:param int version: (Micro) QR Code version constant.
:param int error: Error level constant.
:param codewords: An iterable sequence of codewords (ints)
:return: Byte buffer representing the final message.
"""
ec_infos = consts.ECC[version][error]
last_cw_is_four = version in (consts.VERSION_M1, consts.VERSION_M3)
data_blocks, error_blocks = make_blocks(ec_infos, codewords)
if last_cw_is_four:
# All codewords are 8 bit by default, M1 and M3 symbols use 4 bits
# to represent the last last codeword
# datablocks[0] is save since Micro QR Codes use just one datablock and
# one error block
data_blocks[0][-1] >>= 4
buff = Buffer()
append_int = partial(buff.append_bits, length=8)
# Write codewords
for i in range(max(info.num_data for info in ec_infos)):
for block in data_blocks:
if i >= len(block):
continue
if last_cw_is_four and i + 1 == len(block):
buff.append_bits(block[i], 4)
else:
append_int(block[i])
# Write error codewords
for i in range(max(info.num_total - info.num_data for info in ec_infos)):
for block in error_blocks:
if i >= len(block):
continue
append_int(block[i])
# ISO/IEC 18004:2015(E) -- 7.6 Constructing the final message codeword sequence
# [...] In certain QR Code versions, however, where the number of modules
# available for data and error correction codewords is not an exact multiple
# of 8, there may be a need for 3, 4 or 7 Remainder Bits to be appended to
# the final message bit stream in order to fill exactly the number of
# modules in the encoding region
remainder = 0 # Calculation: Number of Data modules - number of bits
if version in (2, 3, 4, 5, 6):
remainder = 7
elif version in (14, 15, 16, 17, 18, 19, 20, 28, 29, 30, 31, 32, 33, 34):
remainder = 3
elif version in (21, 22, 23, 24, 25, 26, 27):
remainder = 4
buff.extend(b'\0' * remainder)
return buff
|
def function[make_final_message, parameter[version, error, codewords]]:
constant[ Constructs the final message (codewords incl. error correction).
ISO/IEC 18004:2015(E) -- 7.6 Constructing the final message codeword sequence (page 45)
:param int version: (Micro) QR Code version constant.
:param int error: Error level constant.
:param codewords: An iterable sequence of codewords (ints)
:return: Byte buffer representing the final message.
]
variable[ec_infos] assign[=] call[call[name[consts].ECC][name[version]]][name[error]]
variable[last_cw_is_four] assign[=] compare[name[version] in tuple[[<ast.Attribute object at 0x7da204622c50>, <ast.Attribute object at 0x7da204622b00>]]]
<ast.Tuple object at 0x7da204621d20> assign[=] call[name[make_blocks], parameter[name[ec_infos], name[codewords]]]
if name[last_cw_is_four] begin[:]
<ast.AugAssign object at 0x7da2046206d0>
variable[buff] assign[=] call[name[Buffer], parameter[]]
variable[append_int] assign[=] call[name[partial], parameter[name[buff].append_bits]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[max], parameter[<ast.GeneratorExp object at 0x7da2046230d0>]]]]] begin[:]
for taget[name[block]] in starred[name[data_blocks]] begin[:]
if compare[name[i] greater_or_equal[>=] call[name[len], parameter[name[block]]]] begin[:]
continue
if <ast.BoolOp object at 0x7da2046203a0> begin[:]
call[name[buff].append_bits, parameter[call[name[block]][name[i]], constant[4]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[max], parameter[<ast.GeneratorExp object at 0x7da204621ff0>]]]]] begin[:]
for taget[name[block]] in starred[name[error_blocks]] begin[:]
if compare[name[i] greater_or_equal[>=] call[name[len], parameter[name[block]]]] begin[:]
continue
call[name[append_int], parameter[call[name[block]][name[i]]]]
variable[remainder] assign[=] constant[0]
if compare[name[version] in tuple[[<ast.Constant object at 0x7da204621c30>, <ast.Constant object at 0x7da204623af0>, <ast.Constant object at 0x7da204621990>, <ast.Constant object at 0x7da204620220>, <ast.Constant object at 0x7da204620f10>]]] begin[:]
variable[remainder] assign[=] constant[7]
call[name[buff].extend, parameter[binary_operation[constant[b'\x00'] * name[remainder]]]]
return[name[buff]]
|
keyword[def] identifier[make_final_message] ( identifier[version] , identifier[error] , identifier[codewords] ):
literal[string]
identifier[ec_infos] = identifier[consts] . identifier[ECC] [ identifier[version] ][ identifier[error] ]
identifier[last_cw_is_four] = identifier[version] keyword[in] ( identifier[consts] . identifier[VERSION_M1] , identifier[consts] . identifier[VERSION_M3] )
identifier[data_blocks] , identifier[error_blocks] = identifier[make_blocks] ( identifier[ec_infos] , identifier[codewords] )
keyword[if] identifier[last_cw_is_four] :
identifier[data_blocks] [ literal[int] ][- literal[int] ]>>= literal[int]
identifier[buff] = identifier[Buffer] ()
identifier[append_int] = identifier[partial] ( identifier[buff] . identifier[append_bits] , identifier[length] = literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max] ( identifier[info] . identifier[num_data] keyword[for] identifier[info] keyword[in] identifier[ec_infos] )):
keyword[for] identifier[block] keyword[in] identifier[data_blocks] :
keyword[if] identifier[i] >= identifier[len] ( identifier[block] ):
keyword[continue]
keyword[if] identifier[last_cw_is_four] keyword[and] identifier[i] + literal[int] == identifier[len] ( identifier[block] ):
identifier[buff] . identifier[append_bits] ( identifier[block] [ identifier[i] ], literal[int] )
keyword[else] :
identifier[append_int] ( identifier[block] [ identifier[i] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max] ( identifier[info] . identifier[num_total] - identifier[info] . identifier[num_data] keyword[for] identifier[info] keyword[in] identifier[ec_infos] )):
keyword[for] identifier[block] keyword[in] identifier[error_blocks] :
keyword[if] identifier[i] >= identifier[len] ( identifier[block] ):
keyword[continue]
identifier[append_int] ( identifier[block] [ identifier[i] ])
identifier[remainder] = literal[int]
keyword[if] identifier[version] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[remainder] = literal[int]
keyword[elif] identifier[version] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[remainder] = literal[int]
keyword[elif] identifier[version] keyword[in] ( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[remainder] = literal[int]
identifier[buff] . identifier[extend] ( literal[string] * identifier[remainder] )
keyword[return] identifier[buff]
|
def make_final_message(version, error, codewords):
""" Constructs the final message (codewords incl. error correction).
ISO/IEC 18004:2015(E) -- 7.6 Constructing the final message codeword sequence (page 45)
:param int version: (Micro) QR Code version constant.
:param int error: Error level constant.
:param codewords: An iterable sequence of codewords (ints)
:return: Byte buffer representing the final message.
"""
ec_infos = consts.ECC[version][error]
last_cw_is_four = version in (consts.VERSION_M1, consts.VERSION_M3)
(data_blocks, error_blocks) = make_blocks(ec_infos, codewords)
if last_cw_is_four:
# All codewords are 8 bit by default, M1 and M3 symbols use 4 bits
# to represent the last last codeword
# datablocks[0] is save since Micro QR Codes use just one datablock and
# one error block
data_blocks[0][-1] >>= 4 # depends on [control=['if'], data=[]]
buff = Buffer()
append_int = partial(buff.append_bits, length=8)
# Write codewords
for i in range(max((info.num_data for info in ec_infos))):
for block in data_blocks:
if i >= len(block):
continue # depends on [control=['if'], data=[]]
if last_cw_is_four and i + 1 == len(block):
buff.append_bits(block[i], 4) # depends on [control=['if'], data=[]]
else:
append_int(block[i]) # depends on [control=['for'], data=['block']] # depends on [control=['for'], data=['i']]
# Write error codewords
for i in range(max((info.num_total - info.num_data for info in ec_infos))):
for block in error_blocks:
if i >= len(block):
continue # depends on [control=['if'], data=[]]
append_int(block[i]) # depends on [control=['for'], data=['block']] # depends on [control=['for'], data=['i']]
# ISO/IEC 18004:2015(E) -- 7.6 Constructing the final message codeword sequence
# [...] In certain QR Code versions, however, where the number of modules
# available for data and error correction codewords is not an exact multiple
# of 8, there may be a need for 3, 4 or 7 Remainder Bits to be appended to
# the final message bit stream in order to fill exactly the number of
# modules in the encoding region
remainder = 0 # Calculation: Number of Data modules - number of bits
if version in (2, 3, 4, 5, 6):
remainder = 7 # depends on [control=['if'], data=[]]
elif version in (14, 15, 16, 17, 18, 19, 20, 28, 29, 30, 31, 32, 33, 34):
remainder = 3 # depends on [control=['if'], data=[]]
elif version in (21, 22, 23, 24, 25, 26, 27):
remainder = 4 # depends on [control=['if'], data=[]]
buff.extend(b'\x00' * remainder)
return buff
|
def _split_file(self, data=''):
"""
Splits SAR output or SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:param data: Input data instead of file
:type data: str.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
"""
# Filename passed checks through __init__
if ((self.__filename and os.access(self.__filename, os.R_OK))
or data != ''):
fhandle = None
if data == '':
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("Couldn't open file %s" % self.__filename))
fhandle = None
if fhandle or data != '':
datalength = 0
# Dealing with mmap difference on Windows and Linux
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ
else:
dataprot = mmap.PROT_READ
if data != '':
fhandle = -1
datalength = len(data)
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ | mmap.ACCESS_WRITE
else:
dataprot = mmap.PROT_READ | mmap.PROT_WRITE
try:
if platform.system() == 'Windows':
sarmap = mmap.mmap(
fhandle, length=datalength, access=dataprot
)
else:
sarmap = mmap.mmap(
fhandle, length=datalength, prot=dataprot
)
if data != '':
sarmap.write(data)
sarmap.flush()
sarmap.seek(0, os.SEEK_SET)
except (TypeError, IndexError):
if data == '':
os.close(fhandle)
traceback.print_exc()
# sys.exit(-1)
return False
# Here we'll store chunks of SAR file, unparsed
searchunks = []
oldchunkpos = 0
dlpos = sarmap.find('\n\n', 0)
size = 0
if data == '':
# We can do mmap.size() only on read-only mmaps
size = sarmap.size()
else:
# Otherwise, if data was passed to us,
# we measure its length
len(data)
# oldchunkpos = dlpos
while dlpos > -1: # mmap.find() returns -1 on failure.
tempchunk = sarmap.read(dlpos - oldchunkpos)
searchunks.append(tempchunk.strip())
# We remember position, add 2 for 2 DD's
# (newspaces in production). We have to remember
# relative value
oldchunkpos += (dlpos - oldchunkpos) + 2
# We position to new place, to be behind \n\n
# we've looked for.
try:
sarmap.seek(2, os.SEEK_CUR)
except ValueError:
print(('Out of bounds (%s)!\n' % (sarmap.tell())))
# Now we repeat find.
dlpos = sarmap.find("\n\n")
# If it wasn't the end of file, we want last piece of it
if oldchunkpos < size:
tempchunk = sarmap[oldchunkpos:]
searchunks.append(tempchunk.strip())
sarmap.close()
if fhandle != -1:
os.close(fhandle)
if searchunks:
return searchunks
else:
return False
return False
|
def function[_split_file, parameter[self, data]]:
constant[
Splits SAR output or SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:param data: Input data instead of file
:type data: str.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
]
if <ast.BoolOp object at 0x7da1b0381e10> begin[:]
variable[fhandle] assign[=] constant[None]
if compare[name[data] equal[==] constant[]] begin[:]
<ast.Try object at 0x7da1b0383190>
if <ast.BoolOp object at 0x7da1b03831f0> begin[:]
variable[datalength] assign[=] constant[0]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Windows]] begin[:]
variable[dataprot] assign[=] name[mmap].ACCESS_READ
if compare[name[data] not_equal[!=] constant[]] begin[:]
variable[fhandle] assign[=] <ast.UnaryOp object at 0x7da2041da3b0>
variable[datalength] assign[=] call[name[len], parameter[name[data]]]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Windows]] begin[:]
variable[dataprot] assign[=] binary_operation[name[mmap].ACCESS_READ <ast.BitOr object at 0x7da2590d6aa0> name[mmap].ACCESS_WRITE]
<ast.Try object at 0x7da18ede5000>
variable[searchunks] assign[=] list[[]]
variable[oldchunkpos] assign[=] constant[0]
variable[dlpos] assign[=] call[name[sarmap].find, parameter[constant[
], constant[0]]]
variable[size] assign[=] constant[0]
if compare[name[data] equal[==] constant[]] begin[:]
variable[size] assign[=] call[name[sarmap].size, parameter[]]
while compare[name[dlpos] greater[>] <ast.UnaryOp object at 0x7da1b042ddb0>] begin[:]
variable[tempchunk] assign[=] call[name[sarmap].read, parameter[binary_operation[name[dlpos] - name[oldchunkpos]]]]
call[name[searchunks].append, parameter[call[name[tempchunk].strip, parameter[]]]]
<ast.AugAssign object at 0x7da18bc728c0>
<ast.Try object at 0x7da18bc71ab0>
variable[dlpos] assign[=] call[name[sarmap].find, parameter[constant[
]]]
if compare[name[oldchunkpos] less[<] name[size]] begin[:]
variable[tempchunk] assign[=] call[name[sarmap]][<ast.Slice object at 0x7da18bc72e00>]
call[name[searchunks].append, parameter[call[name[tempchunk].strip, parameter[]]]]
call[name[sarmap].close, parameter[]]
if compare[name[fhandle] not_equal[!=] <ast.UnaryOp object at 0x7da18bc701c0>] begin[:]
call[name[os].close, parameter[name[fhandle]]]
if name[searchunks] begin[:]
return[name[searchunks]]
return[constant[False]]
|
keyword[def] identifier[_split_file] ( identifier[self] , identifier[data] = literal[string] ):
literal[string]
keyword[if] (( identifier[self] . identifier[__filename] keyword[and] identifier[os] . identifier[access] ( identifier[self] . identifier[__filename] , identifier[os] . identifier[R_OK] ))
keyword[or] identifier[data] != literal[string] ):
identifier[fhandle] = keyword[None]
keyword[if] identifier[data] == literal[string] :
keyword[try] :
identifier[fhandle] = identifier[os] . identifier[open] ( identifier[self] . identifier[__filename] , identifier[os] . identifier[O_RDONLY] )
keyword[except] identifier[OSError] :
identifier[print] (( literal[string] % identifier[self] . identifier[__filename] ))
identifier[fhandle] = keyword[None]
keyword[if] identifier[fhandle] keyword[or] identifier[data] != literal[string] :
identifier[datalength] = literal[int]
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
identifier[dataprot] = identifier[mmap] . identifier[ACCESS_READ]
keyword[else] :
identifier[dataprot] = identifier[mmap] . identifier[PROT_READ]
keyword[if] identifier[data] != literal[string] :
identifier[fhandle] =- literal[int]
identifier[datalength] = identifier[len] ( identifier[data] )
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
identifier[dataprot] = identifier[mmap] . identifier[ACCESS_READ] | identifier[mmap] . identifier[ACCESS_WRITE]
keyword[else] :
identifier[dataprot] = identifier[mmap] . identifier[PROT_READ] | identifier[mmap] . identifier[PROT_WRITE]
keyword[try] :
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
identifier[sarmap] = identifier[mmap] . identifier[mmap] (
identifier[fhandle] , identifier[length] = identifier[datalength] , identifier[access] = identifier[dataprot]
)
keyword[else] :
identifier[sarmap] = identifier[mmap] . identifier[mmap] (
identifier[fhandle] , identifier[length] = identifier[datalength] , identifier[prot] = identifier[dataprot]
)
keyword[if] identifier[data] != literal[string] :
identifier[sarmap] . identifier[write] ( identifier[data] )
identifier[sarmap] . identifier[flush] ()
identifier[sarmap] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_SET] )
keyword[except] ( identifier[TypeError] , identifier[IndexError] ):
keyword[if] identifier[data] == literal[string] :
identifier[os] . identifier[close] ( identifier[fhandle] )
identifier[traceback] . identifier[print_exc] ()
keyword[return] keyword[False]
identifier[searchunks] =[]
identifier[oldchunkpos] = literal[int]
identifier[dlpos] = identifier[sarmap] . identifier[find] ( literal[string] , literal[int] )
identifier[size] = literal[int]
keyword[if] identifier[data] == literal[string] :
identifier[size] = identifier[sarmap] . identifier[size] ()
keyword[else] :
identifier[len] ( identifier[data] )
keyword[while] identifier[dlpos] >- literal[int] :
identifier[tempchunk] = identifier[sarmap] . identifier[read] ( identifier[dlpos] - identifier[oldchunkpos] )
identifier[searchunks] . identifier[append] ( identifier[tempchunk] . identifier[strip] ())
identifier[oldchunkpos] +=( identifier[dlpos] - identifier[oldchunkpos] )+ literal[int]
keyword[try] :
identifier[sarmap] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_CUR] )
keyword[except] identifier[ValueError] :
identifier[print] (( literal[string] %( identifier[sarmap] . identifier[tell] ())))
identifier[dlpos] = identifier[sarmap] . identifier[find] ( literal[string] )
keyword[if] identifier[oldchunkpos] < identifier[size] :
identifier[tempchunk] = identifier[sarmap] [ identifier[oldchunkpos] :]
identifier[searchunks] . identifier[append] ( identifier[tempchunk] . identifier[strip] ())
identifier[sarmap] . identifier[close] ()
keyword[if] identifier[fhandle] !=- literal[int] :
identifier[os] . identifier[close] ( identifier[fhandle] )
keyword[if] identifier[searchunks] :
keyword[return] identifier[searchunks]
keyword[else] :
keyword[return] keyword[False]
keyword[return] keyword[False]
|
def _split_file(self, data=''):
"""
Splits SAR output or SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:param data: Input data instead of file
:type data: str.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
"""
# Filename passed checks through __init__
if self.__filename and os.access(self.__filename, os.R_OK) or data != '':
fhandle = None
if data == '':
try:
fhandle = os.open(self.__filename, os.O_RDONLY) # depends on [control=['try'], data=[]]
except OSError:
print("Couldn't open file %s" % self.__filename)
fhandle = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if fhandle or data != '':
datalength = 0
# Dealing with mmap difference on Windows and Linux
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ # depends on [control=['if'], data=[]]
else:
dataprot = mmap.PROT_READ
if data != '':
fhandle = -1
datalength = len(data)
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ | mmap.ACCESS_WRITE # depends on [control=['if'], data=[]]
else:
dataprot = mmap.PROT_READ | mmap.PROT_WRITE # depends on [control=['if'], data=['data']]
try:
if platform.system() == 'Windows':
sarmap = mmap.mmap(fhandle, length=datalength, access=dataprot) # depends on [control=['if'], data=[]]
else:
sarmap = mmap.mmap(fhandle, length=datalength, prot=dataprot)
if data != '':
sarmap.write(data)
sarmap.flush()
sarmap.seek(0, os.SEEK_SET) # depends on [control=['if'], data=['data']] # depends on [control=['try'], data=[]]
except (TypeError, IndexError):
if data == '':
os.close(fhandle) # depends on [control=['if'], data=[]]
traceback.print_exc()
# sys.exit(-1)
return False # depends on [control=['except'], data=[]]
# Here we'll store chunks of SAR file, unparsed
searchunks = []
oldchunkpos = 0
dlpos = sarmap.find('\n\n', 0)
size = 0
if data == '':
# We can do mmap.size() only on read-only mmaps
size = sarmap.size() # depends on [control=['if'], data=[]]
else:
# Otherwise, if data was passed to us,
# we measure its length
len(data)
# oldchunkpos = dlpos
while dlpos > -1: # mmap.find() returns -1 on failure.
tempchunk = sarmap.read(dlpos - oldchunkpos)
searchunks.append(tempchunk.strip())
# We remember position, add 2 for 2 DD's
# (newspaces in production). We have to remember
# relative value
oldchunkpos += dlpos - oldchunkpos + 2
# We position to new place, to be behind \n\n
# we've looked for.
try:
sarmap.seek(2, os.SEEK_CUR) # depends on [control=['try'], data=[]]
except ValueError:
print('Out of bounds (%s)!\n' % sarmap.tell()) # depends on [control=['except'], data=[]]
# Now we repeat find.
dlpos = sarmap.find('\n\n') # depends on [control=['while'], data=['dlpos']]
# If it wasn't the end of file, we want last piece of it
if oldchunkpos < size:
tempchunk = sarmap[oldchunkpos:]
searchunks.append(tempchunk.strip()) # depends on [control=['if'], data=['oldchunkpos']]
sarmap.close() # depends on [control=['if'], data=[]]
if fhandle != -1:
os.close(fhandle) # depends on [control=['if'], data=['fhandle']]
if searchunks:
return searchunks # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
return False
|
def search(self, index=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`_
:arg index: A list of index names to search, or a string containing a
comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg allow_partial_search_results: Set to false to return an overall
failure if the request would produce partial results. Defaults to
True, which will allow partial results in the case of timeouts or
partial failures
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg batched_reduce_size: The number of shard results that should be
reduced at once on the coordinating node. This value should be used
as a protection mechanism to reduce the memory overhead per search
request if the potential number of shards in the request can be
large., default 512
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg docvalue_fields: A comma-separated list of fields to return as the
docvalue representation of a field for each hit
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg from\\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg max_concurrent_shard_requests: The number of concurrent shard
requests this search executes concurrently. This value should be
used to limit the impact of the search on the cluster in order to
limit the number of concurrent shard requests, default 'The default
grows with the number of nodes in the cluster but is at most 256.'
:arg pre_filter_shard_size: A threshold that enforces a pre-filter
roundtrip to prefilter search shards based on query rewriting if
the number of shards the search request expands to exceeds the
threshold. This filter roundtrip can limit the number of shards
significantly if for instance a shard can not match any documents
based on it's rewrite method ie. if date filters are mandatory to
match but the shard bounds and the query are disjoint., default 128
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number
in the response. This param is added version 6.x to handle mixed cluster queries where nodes
are in multiple versions (7.0 and 6.latest)
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg stored_fields: A comma-separated list of stored fields to return as
part of a hit
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode, default 'missing', valid
choices are: 'missing', 'popular', 'always'
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Explicit operation timeout
:arg track_scores: Whether to calculate and return scores even if they
are not used for sorting
:arg track_total_hits: Indicate if the number of documents that match
the query should be tracked
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
:arg version: Specify whether to return document version as part of a
hit
"""
# from is a reserved word so it cannot be used, use from_ instead
if "from_" in params:
params["from"] = params.pop("from_")
if not index:
index = "_all"
return self.transport.perform_request(
"GET", _make_path(index, "_search"), params=params, body=body
)
|
def function[search, parameter[self, index, body, params]]:
constant[
Execute a search query and get back search hits that match the query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`_
:arg index: A list of index names to search, or a string containing a
comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg allow_partial_search_results: Set to false to return an overall
failure if the request would produce partial results. Defaults to
True, which will allow partial results in the case of timeouts or
partial failures
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg batched_reduce_size: The number of shard results that should be
reduced at once on the coordinating node. This value should be used
as a protection mechanism to reduce the memory overhead per search
request if the potential number of shards in the request can be
large., default 512
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg docvalue_fields: A comma-separated list of fields to return as the
docvalue representation of a field for each hit
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg from\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg max_concurrent_shard_requests: The number of concurrent shard
requests this search executes concurrently. This value should be
used to limit the impact of the search on the cluster in order to
limit the number of concurrent shard requests, default 'The default
grows with the number of nodes in the cluster but is at most 256.'
:arg pre_filter_shard_size: A threshold that enforces a pre-filter
roundtrip to prefilter search shards based on query rewriting if
the number of shards the search request expands to exceeds the
threshold. This filter roundtrip can limit the number of shards
significantly if for instance a shard can not match any documents
based on it's rewrite method ie. if date filters are mandatory to
match but the shard bounds and the query are disjoint., default 128
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number
in the response. This param is added version 6.x to handle mixed cluster queries where nodes
are in multiple versions (7.0 and 6.latest)
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg stored_fields: A comma-separated list of stored fields to return as
part of a hit
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode, default 'missing', valid
choices are: 'missing', 'popular', 'always'
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Explicit operation timeout
:arg track_scores: Whether to calculate and return scores even if they
are not used for sorting
:arg track_total_hits: Indicate if the number of documents that match
the query should be tracked
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
:arg version: Specify whether to return document version as part of a
hit
]
if compare[constant[from_] in name[params]] begin[:]
call[name[params]][constant[from]] assign[=] call[name[params].pop, parameter[constant[from_]]]
if <ast.UnaryOp object at 0x7da1b219ae60> begin[:]
variable[index] assign[=] constant[_all]
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[name[index], constant[_search]]]]]]
|
keyword[def] identifier[search] ( identifier[self] , identifier[index] = keyword[None] , identifier[body] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[params] :
identifier[params] [ literal[string] ]= identifier[params] . identifier[pop] ( literal[string] )
keyword[if] keyword[not] identifier[index] :
identifier[index] = literal[string]
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( identifier[index] , literal[string] ), identifier[params] = identifier[params] , identifier[body] = identifier[body]
)
|
def search(self, index=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`_
:arg index: A list of index names to search, or a string containing a
comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg allow_partial_search_results: Set to false to return an overall
failure if the request would produce partial results. Defaults to
True, which will allow partial results in the case of timeouts or
partial failures
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg batched_reduce_size: The number of shard results that should be
reduced at once on the coordinating node. This value should be used
as a protection mechanism to reduce the memory overhead per search
request if the potential number of shards in the request can be
large., default 512
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg docvalue_fields: A comma-separated list of fields to return as the
docvalue representation of a field for each hit
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg from\\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg max_concurrent_shard_requests: The number of concurrent shard
requests this search executes concurrently. This value should be
used to limit the impact of the search on the cluster in order to
limit the number of concurrent shard requests, default 'The default
grows with the number of nodes in the cluster but is at most 256.'
:arg pre_filter_shard_size: A threshold that enforces a pre-filter
roundtrip to prefilter search shards based on query rewriting if
the number of shards the search request expands to exceeds the
threshold. This filter roundtrip can limit the number of shards
significantly if for instance a shard can not match any documents
based on it's rewrite method ie. if date filters are mandatory to
match but the shard bounds and the query are disjoint., default 128
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number
in the response. This param is added version 6.x to handle mixed cluster queries where nodes
are in multiple versions (7.0 and 6.latest)
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg stored_fields: A comma-separated list of stored fields to return as
part of a hit
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode, default 'missing', valid
choices are: 'missing', 'popular', 'always'
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Explicit operation timeout
:arg track_scores: Whether to calculate and return scores even if they
are not used for sorting
:arg track_total_hits: Indicate if the number of documents that match
the query should be tracked
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
:arg version: Specify whether to return document version as part of a
hit
"""
# from is a reserved word so it cannot be used, use from_ instead
if 'from_' in params:
params['from'] = params.pop('from_') # depends on [control=['if'], data=['params']]
if not index:
index = '_all' # depends on [control=['if'], data=[]]
return self.transport.perform_request('GET', _make_path(index, '_search'), params=params, body=body)
|
def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
"""Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
"""
# FIXME: move to module level
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr
|
def function[execute, parameter[cmd, shell, poll_period, catch_out]]:
constant[Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[log].debug, parameter[constant[Starting: %s], name[cmd]]]
variable[stdout] assign[=] constant[]
variable[stderr] assign[=] constant[]
if <ast.BoolOp object at 0x7da1b1803dc0> begin[:]
variable[cmd] assign[=] call[name[shlex].split, parameter[name[cmd]]]
if name[catch_out] begin[:]
variable[process] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]]
<ast.Tuple object at 0x7da1b1930490> assign[=] call[name[process].communicate, parameter[]]
if name[stderr] begin[:]
call[name[log].error, parameter[constant[There were errors:
%s], name[stderr]]]
if name[stdout] begin[:]
call[name[log].debug, parameter[constant[Process output:
%s], name[stdout]]]
variable[returncode] assign[=] name[process].returncode
call[name[log].debug, parameter[constant[Process exit code: %s], name[returncode]]]
return[tuple[[<ast.Name object at 0x7da1b1931c30>, <ast.Name object at 0x7da1b1930580>, <ast.Name object at 0x7da1b1933430>]]]
|
keyword[def] identifier[execute] ( identifier[cmd] , identifier[shell] = keyword[False] , identifier[poll_period] = literal[int] , identifier[catch_out] = keyword[False] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[log] . identifier[debug] ( literal[string] , identifier[cmd] )
identifier[stdout] = literal[string]
identifier[stderr] = literal[string]
keyword[if] keyword[not] identifier[shell] keyword[and] identifier[isinstance] ( identifier[cmd] , identifier[string_types] ):
identifier[cmd] = identifier[shlex] . identifier[split] ( identifier[cmd] )
keyword[if] identifier[catch_out] :
identifier[process] = identifier[subprocess] . identifier[Popen] (
identifier[cmd] ,
identifier[shell] = identifier[shell] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[close_fds] = keyword[True] )
keyword[else] :
identifier[process] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = identifier[shell] , identifier[close_fds] = keyword[True] )
identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate] ()
keyword[if] identifier[stderr] :
identifier[log] . identifier[error] ( literal[string] , identifier[stderr] )
keyword[if] identifier[stdout] :
identifier[log] . identifier[debug] ( literal[string] , identifier[stdout] )
identifier[returncode] = identifier[process] . identifier[returncode]
identifier[log] . identifier[debug] ( literal[string] , identifier[returncode] )
keyword[return] identifier[returncode] , identifier[stdout] , identifier[stderr]
|
def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
"""Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
"""
# FIXME: move to module level
log = logging.getLogger(__name__)
log.debug('Starting: %s', cmd)
stdout = ''
stderr = ''
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd) # depends on [control=['if'], data=[]]
if catch_out:
process = subprocess.Popen(cmd, shell=shell, stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) # depends on [control=['if'], data=[]]
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
(stdout, stderr) = process.communicate()
if stderr:
log.error('There were errors:\n%s', stderr) # depends on [control=['if'], data=[]]
if stdout:
log.debug('Process output:\n%s', stdout) # depends on [control=['if'], data=[]]
returncode = process.returncode
log.debug('Process exit code: %s', returncode)
return (returncode, stdout, stderr)
|
def _run(self):
'''The actor's main work loop'''
while self._is_running:
yield from self._task()
# Signal that the loop has finished.
self._run_complete.set_result(True)
|
def function[_run, parameter[self]]:
constant[The actor's main work loop]
while name[self]._is_running begin[:]
<ast.YieldFrom object at 0x7da1b0a9e2f0>
call[name[self]._run_complete.set_result, parameter[constant[True]]]
|
keyword[def] identifier[_run] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[_is_running] :
keyword[yield] keyword[from] identifier[self] . identifier[_task] ()
identifier[self] . identifier[_run_complete] . identifier[set_result] ( keyword[True] )
|
def _run(self):
"""The actor's main work loop"""
while self._is_running:
yield from self._task() # depends on [control=['while'], data=[]]
# Signal that the loop has finished.
self._run_complete.set_result(True)
|
def strip_rate(self, idx):
"""strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
"""
val, = struct.unpack_from('<B', self._rtap, idx)
rate_unit = float(1) / 2 # Mbps
return idx + 1, rate_unit * val
|
def function[strip_rate, parameter[self, idx]]:
constant[strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
]
<ast.Tuple object at 0x7da1aff01c00> assign[=] call[name[struct].unpack_from, parameter[constant[<B], name[self]._rtap, name[idx]]]
variable[rate_unit] assign[=] binary_operation[call[name[float], parameter[constant[1]]] / constant[2]]
return[tuple[[<ast.BinOp object at 0x7da1aff025c0>, <ast.BinOp object at 0x7da1aff011b0>]]]
|
keyword[def] identifier[strip_rate] ( identifier[self] , identifier[idx] ):
literal[string]
identifier[val] ,= identifier[struct] . identifier[unpack_from] ( literal[string] , identifier[self] . identifier[_rtap] , identifier[idx] )
identifier[rate_unit] = identifier[float] ( literal[int] )/ literal[int]
keyword[return] identifier[idx] + literal[int] , identifier[rate_unit] * identifier[val]
|
def strip_rate(self, idx):
"""strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
"""
(val,) = struct.unpack_from('<B', self._rtap, idx)
rate_unit = float(1) / 2 # Mbps
return (idx + 1, rate_unit * val)
|
def _get(self, ip):
"""
Get information about an IP.
Args:
ip (str): an IP (xxx.xxx.xxx.xxx).
Returns:
dict: see http://ipinfo.io/developers/getting-started
"""
# Geoloc updated up to once a week:
# http://ipinfo.io/developers/data#geolocation-data
retries = 10
for retry in range(retries):
try:
response = requests.get('http://ipinfo.io/%s/json' % ip,
verify=False, timeout=1) # nosec
if response.status_code == 429:
raise RateExceededError
return response.json()
except (requests.ReadTimeout, requests.ConnectTimeout):
pass
return {}
|
def function[_get, parameter[self, ip]]:
constant[
Get information about an IP.
Args:
ip (str): an IP (xxx.xxx.xxx.xxx).
Returns:
dict: see http://ipinfo.io/developers/getting-started
]
variable[retries] assign[=] constant[10]
for taget[name[retry]] in starred[call[name[range], parameter[name[retries]]]] begin[:]
<ast.Try object at 0x7da1b2405fc0>
return[dictionary[[], []]]
|
keyword[def] identifier[_get] ( identifier[self] , identifier[ip] ):
literal[string]
identifier[retries] = literal[int]
keyword[for] identifier[retry] keyword[in] identifier[range] ( identifier[retries] ):
keyword[try] :
identifier[response] = identifier[requests] . identifier[get] ( literal[string] % identifier[ip] ,
identifier[verify] = keyword[False] , identifier[timeout] = literal[int] )
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[raise] identifier[RateExceededError]
keyword[return] identifier[response] . identifier[json] ()
keyword[except] ( identifier[requests] . identifier[ReadTimeout] , identifier[requests] . identifier[ConnectTimeout] ):
keyword[pass]
keyword[return] {}
|
def _get(self, ip):
"""
Get information about an IP.
Args:
ip (str): an IP (xxx.xxx.xxx.xxx).
Returns:
dict: see http://ipinfo.io/developers/getting-started
"""
# Geoloc updated up to once a week:
# http://ipinfo.io/developers/data#geolocation-data
retries = 10
for retry in range(retries):
try:
response = requests.get('http://ipinfo.io/%s/json' % ip, verify=False, timeout=1) # nosec
if response.status_code == 429:
raise RateExceededError # depends on [control=['if'], data=[]]
return response.json() # depends on [control=['try'], data=[]]
except (requests.ReadTimeout, requests.ConnectTimeout):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return {}
|
def append_note(self, player, text):
"""Append text to an already existing note."""
note = self._find_note(player)
note.text += text
|
def function[append_note, parameter[self, player, text]]:
constant[Append text to an already existing note.]
variable[note] assign[=] call[name[self]._find_note, parameter[name[player]]]
<ast.AugAssign object at 0x7da1b15f01f0>
|
keyword[def] identifier[append_note] ( identifier[self] , identifier[player] , identifier[text] ):
literal[string]
identifier[note] = identifier[self] . identifier[_find_note] ( identifier[player] )
identifier[note] . identifier[text] += identifier[text]
|
def append_note(self, player, text):
"""Append text to an already existing note."""
note = self._find_note(player)
note.text += text
|
def switch(stage):
"""
Switch to given stage (dev/qa/production) + pull
"""
stage = stage.lower()
local("git pull")
if stage in ['dev', 'devel', 'develop']:
branch_name = 'develop'
elif stage in ['qa', 'release']:
branches = local('git branch -r', capture=True)
possible_branches = []
for b in branches.split("\n"):
b_parts = b.split('/')
if b_parts[1] == 'release':
possible_branches.append(b_parts[2])
if len(possible_branches) == 0:
raise Exception('No release branches found. Please create a new release first.')
possible_branches = sorted(possible_branches, reverse=True)
branch_name = 'release/%s' % possible_branches[0]
elif stage in ['production', 'master']:
branch_name = 'master'
else:
raise NotImplemented
local("git checkout %s" % branch_name)
local("git pull")
|
def function[switch, parameter[stage]]:
constant[
Switch to given stage (dev/qa/production) + pull
]
variable[stage] assign[=] call[name[stage].lower, parameter[]]
call[name[local], parameter[constant[git pull]]]
if compare[name[stage] in list[[<ast.Constant object at 0x7da204565930>, <ast.Constant object at 0x7da204567760>, <ast.Constant object at 0x7da2045675b0>]]] begin[:]
variable[branch_name] assign[=] constant[develop]
call[name[local], parameter[binary_operation[constant[git checkout %s] <ast.Mod object at 0x7da2590d6920> name[branch_name]]]]
call[name[local], parameter[constant[git pull]]]
|
keyword[def] identifier[switch] ( identifier[stage] ):
literal[string]
identifier[stage] = identifier[stage] . identifier[lower] ()
identifier[local] ( literal[string] )
keyword[if] identifier[stage] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[branch_name] = literal[string]
keyword[elif] identifier[stage] keyword[in] [ literal[string] , literal[string] ]:
identifier[branches] = identifier[local] ( literal[string] , identifier[capture] = keyword[True] )
identifier[possible_branches] =[]
keyword[for] identifier[b] keyword[in] identifier[branches] . identifier[split] ( literal[string] ):
identifier[b_parts] = identifier[b] . identifier[split] ( literal[string] )
keyword[if] identifier[b_parts] [ literal[int] ]== literal[string] :
identifier[possible_branches] . identifier[append] ( identifier[b_parts] [ literal[int] ])
keyword[if] identifier[len] ( identifier[possible_branches] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[possible_branches] = identifier[sorted] ( identifier[possible_branches] , identifier[reverse] = keyword[True] )
identifier[branch_name] = literal[string] % identifier[possible_branches] [ literal[int] ]
keyword[elif] identifier[stage] keyword[in] [ literal[string] , literal[string] ]:
identifier[branch_name] = literal[string]
keyword[else] :
keyword[raise] identifier[NotImplemented]
identifier[local] ( literal[string] % identifier[branch_name] )
identifier[local] ( literal[string] )
|
def switch(stage):
"""
Switch to given stage (dev/qa/production) + pull
"""
stage = stage.lower()
local('git pull')
if stage in ['dev', 'devel', 'develop']:
branch_name = 'develop' # depends on [control=['if'], data=[]]
elif stage in ['qa', 'release']:
branches = local('git branch -r', capture=True)
possible_branches = []
for b in branches.split('\n'):
b_parts = b.split('/')
if b_parts[1] == 'release':
possible_branches.append(b_parts[2]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['b']]
if len(possible_branches) == 0:
raise Exception('No release branches found. Please create a new release first.') # depends on [control=['if'], data=[]]
possible_branches = sorted(possible_branches, reverse=True)
branch_name = 'release/%s' % possible_branches[0] # depends on [control=['if'], data=[]]
elif stage in ['production', 'master']:
branch_name = 'master' # depends on [control=['if'], data=[]]
else:
raise NotImplemented
local('git checkout %s' % branch_name)
local('git pull')
|
def kde_statsmodels_u(data, grid, **kwargs):
"""
Univariate Kernel Density Estimation with Statsmodels
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x 1` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x 1` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
"""
kde = KDEUnivariate(data)
kde.fit(**kwargs)
return kde.evaluate(grid)
|
def function[kde_statsmodels_u, parameter[data, grid]]:
constant[
Univariate Kernel Density Estimation with Statsmodels
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x 1` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x 1` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
]
variable[kde] assign[=] call[name[KDEUnivariate], parameter[name[data]]]
call[name[kde].fit, parameter[]]
return[call[name[kde].evaluate, parameter[name[grid]]]]
|
keyword[def] identifier[kde_statsmodels_u] ( identifier[data] , identifier[grid] ,** identifier[kwargs] ):
literal[string]
identifier[kde] = identifier[KDEUnivariate] ( identifier[data] )
identifier[kde] . identifier[fit] (** identifier[kwargs] )
keyword[return] identifier[kde] . identifier[evaluate] ( identifier[grid] )
|
def kde_statsmodels_u(data, grid, **kwargs):
"""
Univariate Kernel Density Estimation with Statsmodels
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x 1` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x 1` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
"""
kde = KDEUnivariate(data)
kde.fit(**kwargs)
return kde.evaluate(grid)
|
def add_time(data):
"""And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
"""
payload = data['data']
updated = data['updated'].date()
if updated == date.today():
payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')
elif updated >= (date.today() - timedelta(days=1)):
payload['last_updated'] = 'yesterday'
elif updated >= (date.today() - timedelta(days=7)):
payload['last_updated'] = updated.strftime('on %A')
else:
payload['last_updated'] = updated.strftime('%Y-%m-%d')
return payload
|
def function[add_time, parameter[data]]:
constant[And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
]
variable[payload] assign[=] call[name[data]][constant[data]]
variable[updated] assign[=] call[call[name[data]][constant[updated]].date, parameter[]]
if compare[name[updated] equal[==] call[name[date].today, parameter[]]] begin[:]
call[name[payload]][constant[last_updated]] assign[=] call[call[name[data]][constant[updated]].strftime, parameter[constant[today at %H:%M:%S]]]
return[name[payload]]
|
keyword[def] identifier[add_time] ( identifier[data] ):
literal[string]
identifier[payload] = identifier[data] [ literal[string] ]
identifier[updated] = identifier[data] [ literal[string] ]. identifier[date] ()
keyword[if] identifier[updated] == identifier[date] . identifier[today] ():
identifier[payload] [ literal[string] ]= identifier[data] [ literal[string] ]. identifier[strftime] ( literal[string] )
keyword[elif] identifier[updated] >=( identifier[date] . identifier[today] ()- identifier[timedelta] ( identifier[days] = literal[int] )):
identifier[payload] [ literal[string] ]= literal[string]
keyword[elif] identifier[updated] >=( identifier[date] . identifier[today] ()- identifier[timedelta] ( identifier[days] = literal[int] )):
identifier[payload] [ literal[string] ]= identifier[updated] . identifier[strftime] ( literal[string] )
keyword[else] :
identifier[payload] [ literal[string] ]= identifier[updated] . identifier[strftime] ( literal[string] )
keyword[return] identifier[payload]
|
def add_time(data):
"""And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
"""
payload = data['data']
updated = data['updated'].date()
if updated == date.today():
payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S') # depends on [control=['if'], data=[]]
elif updated >= date.today() - timedelta(days=1):
payload['last_updated'] = 'yesterday' # depends on [control=['if'], data=[]]
elif updated >= date.today() - timedelta(days=7):
payload['last_updated'] = updated.strftime('on %A') # depends on [control=['if'], data=['updated']]
else:
payload['last_updated'] = updated.strftime('%Y-%m-%d')
return payload
|
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(str(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
sys.stdout.write(str(s))
sys.stdout.flush()
return
if m.first_byte and opts.auto_protocol:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
|
def function[process_master, parameter[m]]:
constant[process packets from the MAVLink master]
<ast.Try object at 0x7da2041da8c0>
if compare[call[name[len], parameter[name[s]]] equal[==] constant[0]] begin[:]
call[name[time].sleep, parameter[constant[0.1]]]
return[None]
if compare[binary_operation[name[mpstate].settings.compdebug <ast.BitAnd object at 0x7da2590d6b60> constant[1]] not_equal[!=] constant[0]] begin[:]
return[None]
if name[mpstate].logqueue_raw begin[:]
call[name[mpstate].logqueue_raw.put, parameter[call[name[str], parameter[name[s]]]]]
if name[mpstate].status.setup_mode begin[:]
if compare[name[mpstate].system equal[==] constant[Windows]] begin[:]
variable[s] assign[=] call[name[s].replace, parameter[constant[[K], constant[]]]
call[name[sys].stdout.write, parameter[call[name[str], parameter[name[s]]]]]
call[name[sys].stdout.flush, parameter[]]
return[None]
if <ast.BoolOp object at 0x7da2041d90c0> begin[:]
call[name[m].auto_mavlink_version, parameter[name[s]]]
variable[msgs] assign[=] call[name[m].mav.parse_buffer, parameter[name[s]]]
if name[msgs] begin[:]
for taget[name[msg]] in starred[name[msgs]] begin[:]
variable[sysid] assign[=] call[name[msg].get_srcSystem, parameter[]]
if compare[name[sysid] in name[mpstate].sysid_outputs] begin[:]
continue
if compare[call[name[getattr], parameter[name[m], constant[_timestamp], constant[None]]] is constant[None]] begin[:]
call[name[m].post_message, parameter[name[msg]]]
if compare[call[name[msg].get_type, parameter[]] equal[==] constant[BAD_DATA]] begin[:]
if name[opts].show_errors begin[:]
call[name[mpstate].console.writeln, parameter[binary_operation[constant[MAV error: %s] <ast.Mod object at 0x7da2590d6920> name[msg]]]]
<ast.AugAssign object at 0x7da2041d9360>
|
keyword[def] identifier[process_master] ( identifier[m] ):
literal[string]
keyword[try] :
identifier[s] = identifier[m] . identifier[recv] ( literal[int] * literal[int] )
keyword[except] identifier[Exception] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return]
keyword[if] identifier[len] ( identifier[s] )== literal[int] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return]
keyword[if] ( identifier[mpstate] . identifier[settings] . identifier[compdebug] & literal[int] )!= literal[int] :
keyword[return]
keyword[if] identifier[mpstate] . identifier[logqueue_raw] :
identifier[mpstate] . identifier[logqueue_raw] . identifier[put] ( identifier[str] ( identifier[s] ))
keyword[if] identifier[mpstate] . identifier[status] . identifier[setup_mode] :
keyword[if] identifier[mpstate] . identifier[system] == literal[string] :
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( identifier[str] ( identifier[s] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[return]
keyword[if] identifier[m] . identifier[first_byte] keyword[and] identifier[opts] . identifier[auto_protocol] :
identifier[m] . identifier[auto_mavlink_version] ( identifier[s] )
identifier[msgs] = identifier[m] . identifier[mav] . identifier[parse_buffer] ( identifier[s] )
keyword[if] identifier[msgs] :
keyword[for] identifier[msg] keyword[in] identifier[msgs] :
identifier[sysid] = identifier[msg] . identifier[get_srcSystem] ()
keyword[if] identifier[sysid] keyword[in] identifier[mpstate] . identifier[sysid_outputs] :
keyword[continue]
keyword[if] identifier[getattr] ( identifier[m] , literal[string] , keyword[None] ) keyword[is] keyword[None] :
identifier[m] . identifier[post_message] ( identifier[msg] )
keyword[if] identifier[msg] . identifier[get_type] ()== literal[string] :
keyword[if] identifier[opts] . identifier[show_errors] :
identifier[mpstate] . identifier[console] . identifier[writeln] ( literal[string] % identifier[msg] )
identifier[mpstate] . identifier[status] . identifier[mav_error] += literal[int]
|
def process_master(m):
"""process packets from the MAVLink master"""
try:
s = m.recv(16 * 1024) # depends on [control=['try'], data=[]]
except Exception:
time.sleep(0.1)
return # depends on [control=['except'], data=[]]
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return # depends on [control=['if'], data=[]]
if mpstate.settings.compdebug & 1 != 0:
return # depends on [control=['if'], data=[]]
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(str(s)) # depends on [control=['if'], data=[]]
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace('\x1b[K', '') # depends on [control=['if'], data=[]]
sys.stdout.write(str(s))
sys.stdout.flush()
return # depends on [control=['if'], data=[]]
if m.first_byte and opts.auto_protocol:
m.auto_mavlink_version(s) # depends on [control=['if'], data=[]]
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue # depends on [control=['if'], data=[]]
if getattr(m, '_timestamp', None) is None:
m.post_message(msg) # depends on [control=['if'], data=[]]
if msg.get_type() == 'BAD_DATA':
if opts.show_errors:
mpstate.console.writeln('MAV error: %s' % msg) # depends on [control=['if'], data=[]]
mpstate.status.mav_error += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['msg']] # depends on [control=['if'], data=[]]
|
def cli(ctx, feature_id, organism="", sequence=""):
"""Set the feature to read through the first encountered stop codon
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.set_readthrough_stop_codon(feature_id, organism=organism, sequence=sequence)
|
def function[cli, parameter[ctx, feature_id, organism, sequence]]:
constant[Set the feature to read through the first encountered stop codon
Output:
A standard apollo feature dictionary ({"features": [{...}]})
]
return[call[name[ctx].gi.annotations.set_readthrough_stop_codon, parameter[name[feature_id]]]]
|
keyword[def] identifier[cli] ( identifier[ctx] , identifier[feature_id] , identifier[organism] = literal[string] , identifier[sequence] = literal[string] ):
literal[string]
keyword[return] identifier[ctx] . identifier[gi] . identifier[annotations] . identifier[set_readthrough_stop_codon] ( identifier[feature_id] , identifier[organism] = identifier[organism] , identifier[sequence] = identifier[sequence] )
|
def cli(ctx, feature_id, organism='', sequence=''):
"""Set the feature to read through the first encountered stop codon
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.set_readthrough_stop_codon(feature_id, organism=organism, sequence=sequence)
|
def prepare(args):
"""
%prog prepare countfolder families
Parse list of count files and group per family into families folder.
"""
p = OptionParser(prepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
counts, families = args
countfiles = glob(op.join(counts, "*.count"))
countsdb = defaultdict(list)
for c in countfiles:
rs = RiceSample(c)
countsdb[(rs.tissue, rs.ind)].append(rs)
# Merge duplicates - data sequenced in different batches
key = lambda x: (x.label, x.rep)
for (tissue, ind), rs in sorted(countsdb.items()):
rs.sort(key=key)
nrs = len(rs)
for i in xrange(nrs):
ri = rs[i]
if not ri.working:
continue
for j in xrange(i + 1, nrs):
rj = rs[j]
if key(ri) != key(rj):
continue
ri.merge(rj)
rj.working = False
countsdb[(tissue, ind)] = [x for x in rs if x.working]
# Group into families
mkdir("families")
for (tissue, ind), r in sorted(countsdb.items()):
r = list(r)
if r[0].label != "F1":
continue
P1, P2 = r[0].P1, r[0].P2
P1, P2 = countsdb[(tissue, P1)], countsdb[(tissue, P2)]
rs = P1 + P2 + r
groups = [1] * len(P1) + [2] * len(P2) + [3] * len(r)
assert len(rs) == len(groups)
outfile = "-".join((tissue, ind))
merge_counts(rs, op.join(families, outfile))
groupsfile = outfile + ".groups"
fw = open(op.join(families, groupsfile), "w")
print(",".join(str(x) for x in groups), file=fw)
fw.close()
|
def function[prepare, parameter[args]]:
constant[
%prog prepare countfolder families
Parse list of count files and group per family into families folder.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[prepare].__doc__]]
<ast.Tuple object at 0x7da2054a4670> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2054a5720>]]
<ast.Tuple object at 0x7da2054a5b70> assign[=] name[args]
variable[countfiles] assign[=] call[name[glob], parameter[call[name[op].join, parameter[name[counts], constant[*.count]]]]]
variable[countsdb] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[c]] in starred[name[countfiles]] begin[:]
variable[rs] assign[=] call[name[RiceSample], parameter[name[c]]]
call[call[name[countsdb]][tuple[[<ast.Attribute object at 0x7da2054a60b0>, <ast.Attribute object at 0x7da2054a7e80>]]].append, parameter[name[rs]]]
variable[key] assign[=] <ast.Lambda object at 0x7da2054a4eb0>
for taget[tuple[[<ast.Tuple object at 0x7da2054a6680>, <ast.Name object at 0x7da2054a6230>]]] in starred[call[name[sorted], parameter[call[name[countsdb].items, parameter[]]]]] begin[:]
call[name[rs].sort, parameter[]]
variable[nrs] assign[=] call[name[len], parameter[name[rs]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[nrs]]]] begin[:]
variable[ri] assign[=] call[name[rs]][name[i]]
if <ast.UnaryOp object at 0x7da1b2346440> begin[:]
continue
for taget[name[j]] in starred[call[name[xrange], parameter[binary_operation[name[i] + constant[1]], name[nrs]]]] begin[:]
variable[rj] assign[=] call[name[rs]][name[j]]
if compare[call[name[key], parameter[name[ri]]] not_equal[!=] call[name[key], parameter[name[rj]]]] begin[:]
continue
call[name[ri].merge, parameter[name[rj]]]
name[rj].working assign[=] constant[False]
call[name[countsdb]][tuple[[<ast.Name object at 0x7da20c76f850>, <ast.Name object at 0x7da20c76d1b0>]]] assign[=] <ast.ListComp object at 0x7da20c76cac0>
call[name[mkdir], parameter[constant[families]]]
for taget[tuple[[<ast.Tuple object at 0x7da20c76fd30>, <ast.Name object at 0x7da20c76ff10>]]] in starred[call[name[sorted], parameter[call[name[countsdb].items, parameter[]]]]] begin[:]
variable[r] assign[=] call[name[list], parameter[name[r]]]
if compare[call[name[r]][constant[0]].label not_equal[!=] constant[F1]] begin[:]
continue
<ast.Tuple object at 0x7da20c76e6b0> assign[=] tuple[[<ast.Attribute object at 0x7da20c76d300>, <ast.Attribute object at 0x7da20c76ebc0>]]
<ast.Tuple object at 0x7da20c76c280> assign[=] tuple[[<ast.Subscript object at 0x7da20c76d810>, <ast.Subscript object at 0x7da20c76cee0>]]
variable[rs] assign[=] binary_operation[binary_operation[name[P1] + name[P2]] + name[r]]
variable[groups] assign[=] binary_operation[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da20c76ffa0>]] * call[name[len], parameter[name[P1]]]] + binary_operation[list[[<ast.Constant object at 0x7da20c76f280>]] * call[name[len], parameter[name[P2]]]]] + binary_operation[list[[<ast.Constant object at 0x7da20c76cbe0>]] * call[name[len], parameter[name[r]]]]]
assert[compare[call[name[len], parameter[name[rs]]] equal[==] call[name[len], parameter[name[groups]]]]]
variable[outfile] assign[=] call[constant[-].join, parameter[tuple[[<ast.Name object at 0x7da1b08ac520>, <ast.Name object at 0x7da1b08afa30>]]]]
call[name[merge_counts], parameter[name[rs], call[name[op].join, parameter[name[families], name[outfile]]]]]
variable[groupsfile] assign[=] binary_operation[name[outfile] + constant[.groups]]
variable[fw] assign[=] call[name[open], parameter[call[name[op].join, parameter[name[families], name[groupsfile]]], constant[w]]]
call[name[print], parameter[call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da1b08ad2a0>]]]]
call[name[fw].close, parameter[]]
|
keyword[def] identifier[prepare] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[prepare] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[counts] , identifier[families] = identifier[args]
identifier[countfiles] = identifier[glob] ( identifier[op] . identifier[join] ( identifier[counts] , literal[string] ))
identifier[countsdb] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[c] keyword[in] identifier[countfiles] :
identifier[rs] = identifier[RiceSample] ( identifier[c] )
identifier[countsdb] [( identifier[rs] . identifier[tissue] , identifier[rs] . identifier[ind] )]. identifier[append] ( identifier[rs] )
identifier[key] = keyword[lambda] identifier[x] :( identifier[x] . identifier[label] , identifier[x] . identifier[rep] )
keyword[for] ( identifier[tissue] , identifier[ind] ), identifier[rs] keyword[in] identifier[sorted] ( identifier[countsdb] . identifier[items] ()):
identifier[rs] . identifier[sort] ( identifier[key] = identifier[key] )
identifier[nrs] = identifier[len] ( identifier[rs] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[nrs] ):
identifier[ri] = identifier[rs] [ identifier[i] ]
keyword[if] keyword[not] identifier[ri] . identifier[working] :
keyword[continue]
keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[i] + literal[int] , identifier[nrs] ):
identifier[rj] = identifier[rs] [ identifier[j] ]
keyword[if] identifier[key] ( identifier[ri] )!= identifier[key] ( identifier[rj] ):
keyword[continue]
identifier[ri] . identifier[merge] ( identifier[rj] )
identifier[rj] . identifier[working] = keyword[False]
identifier[countsdb] [( identifier[tissue] , identifier[ind] )]=[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[rs] keyword[if] identifier[x] . identifier[working] ]
identifier[mkdir] ( literal[string] )
keyword[for] ( identifier[tissue] , identifier[ind] ), identifier[r] keyword[in] identifier[sorted] ( identifier[countsdb] . identifier[items] ()):
identifier[r] = identifier[list] ( identifier[r] )
keyword[if] identifier[r] [ literal[int] ]. identifier[label] != literal[string] :
keyword[continue]
identifier[P1] , identifier[P2] = identifier[r] [ literal[int] ]. identifier[P1] , identifier[r] [ literal[int] ]. identifier[P2]
identifier[P1] , identifier[P2] = identifier[countsdb] [( identifier[tissue] , identifier[P1] )], identifier[countsdb] [( identifier[tissue] , identifier[P2] )]
identifier[rs] = identifier[P1] + identifier[P2] + identifier[r]
identifier[groups] =[ literal[int] ]* identifier[len] ( identifier[P1] )+[ literal[int] ]* identifier[len] ( identifier[P2] )+[ literal[int] ]* identifier[len] ( identifier[r] )
keyword[assert] identifier[len] ( identifier[rs] )== identifier[len] ( identifier[groups] )
identifier[outfile] = literal[string] . identifier[join] (( identifier[tissue] , identifier[ind] ))
identifier[merge_counts] ( identifier[rs] , identifier[op] . identifier[join] ( identifier[families] , identifier[outfile] ))
identifier[groupsfile] = identifier[outfile] + literal[string]
identifier[fw] = identifier[open] ( identifier[op] . identifier[join] ( identifier[families] , identifier[groupsfile] ), literal[string] )
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[groups] ), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
|
def prepare(args):
"""
%prog prepare countfolder families
Parse list of count files and group per family into families folder.
"""
p = OptionParser(prepare.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(counts, families) = args
countfiles = glob(op.join(counts, '*.count'))
countsdb = defaultdict(list)
for c in countfiles:
rs = RiceSample(c)
countsdb[rs.tissue, rs.ind].append(rs) # depends on [control=['for'], data=['c']]
# Merge duplicates - data sequenced in different batches
key = lambda x: (x.label, x.rep)
for ((tissue, ind), rs) in sorted(countsdb.items()):
rs.sort(key=key)
nrs = len(rs)
for i in xrange(nrs):
ri = rs[i]
if not ri.working:
continue # depends on [control=['if'], data=[]]
for j in xrange(i + 1, nrs):
rj = rs[j]
if key(ri) != key(rj):
continue # depends on [control=['if'], data=[]]
ri.merge(rj)
rj.working = False # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
countsdb[tissue, ind] = [x for x in rs if x.working] # depends on [control=['for'], data=[]]
# Group into families
mkdir('families')
for ((tissue, ind), r) in sorted(countsdb.items()):
r = list(r)
if r[0].label != 'F1':
continue # depends on [control=['if'], data=[]]
(P1, P2) = (r[0].P1, r[0].P2)
(P1, P2) = (countsdb[tissue, P1], countsdb[tissue, P2])
rs = P1 + P2 + r
groups = [1] * len(P1) + [2] * len(P2) + [3] * len(r)
assert len(rs) == len(groups)
outfile = '-'.join((tissue, ind))
merge_counts(rs, op.join(families, outfile))
groupsfile = outfile + '.groups'
fw = open(op.join(families, groupsfile), 'w')
print(','.join((str(x) for x in groups)), file=fw)
fw.close() # depends on [control=['for'], data=[]]
|
def _validate_partition_boundary(boundary):
'''
Ensure valid partition boundaries are supplied.
'''
boundary = six.text_type(boundary)
match = re.search(r'^([\d.]+)(\D*)$', boundary)
if match:
unit = match.group(2)
if not unit or unit in VALID_UNITS:
return
raise CommandExecutionError(
'Invalid partition boundary passed: "{0}"'.format(boundary)
)
|
def function[_validate_partition_boundary, parameter[boundary]]:
constant[
Ensure valid partition boundaries are supplied.
]
variable[boundary] assign[=] call[name[six].text_type, parameter[name[boundary]]]
variable[match] assign[=] call[name[re].search, parameter[constant[^([\d.]+)(\D*)$], name[boundary]]]
if name[match] begin[:]
variable[unit] assign[=] call[name[match].group, parameter[constant[2]]]
if <ast.BoolOp object at 0x7da18dc05ae0> begin[:]
return[None]
<ast.Raise object at 0x7da18dc05a80>
|
keyword[def] identifier[_validate_partition_boundary] ( identifier[boundary] ):
literal[string]
identifier[boundary] = identifier[six] . identifier[text_type] ( identifier[boundary] )
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[boundary] )
keyword[if] identifier[match] :
identifier[unit] = identifier[match] . identifier[group] ( literal[int] )
keyword[if] keyword[not] identifier[unit] keyword[or] identifier[unit] keyword[in] identifier[VALID_UNITS] :
keyword[return]
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[boundary] )
)
|
def _validate_partition_boundary(boundary):
"""
Ensure valid partition boundaries are supplied.
"""
boundary = six.text_type(boundary)
match = re.search('^([\\d.]+)(\\D*)$', boundary)
if match:
unit = match.group(2)
if not unit or unit in VALID_UNITS:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
raise CommandExecutionError('Invalid partition boundary passed: "{0}"'.format(boundary))
|
def from_dict(d):
"""
Re-create the Specs from a dictionary representation.
:param Dict[str, Any] d: The dictionary representation.
:return: The restored Specs.
:rtype: Specs
"""
return Specs(
qubits_specs=sorted([QubitSpecs(id=int(q),
fRO=qspecs.get('fRO'),
f1QRB=qspecs.get('f1QRB'),
T1=qspecs.get('T1'),
T2=qspecs.get('T2'),
fActiveReset=qspecs.get('fActiveReset'))
for q, qspecs in d["1Q"].items()],
key=lambda qubit_specs: qubit_specs.id),
edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split('-')],
fBellState=especs.get('fBellState'),
fCZ=especs.get('fCZ'),
fCZ_std_err=especs.get('fCZ_std_err'),
fCPHASE=especs.get('fCPHASE'))
for e, especs in d["2Q"].items()],
key=lambda edge_specs: edge_specs.targets)
)
|
def function[from_dict, parameter[d]]:
constant[
Re-create the Specs from a dictionary representation.
:param Dict[str, Any] d: The dictionary representation.
:return: The restored Specs.
:rtype: Specs
]
return[call[name[Specs], parameter[]]]
|
keyword[def] identifier[from_dict] ( identifier[d] ):
literal[string]
keyword[return] identifier[Specs] (
identifier[qubits_specs] = identifier[sorted] ([ identifier[QubitSpecs] ( identifier[id] = identifier[int] ( identifier[q] ),
identifier[fRO] = identifier[qspecs] . identifier[get] ( literal[string] ),
identifier[f1QRB] = identifier[qspecs] . identifier[get] ( literal[string] ),
identifier[T1] = identifier[qspecs] . identifier[get] ( literal[string] ),
identifier[T2] = identifier[qspecs] . identifier[get] ( literal[string] ),
identifier[fActiveReset] = identifier[qspecs] . identifier[get] ( literal[string] ))
keyword[for] identifier[q] , identifier[qspecs] keyword[in] identifier[d] [ literal[string] ]. identifier[items] ()],
identifier[key] = keyword[lambda] identifier[qubit_specs] : identifier[qubit_specs] . identifier[id] ),
identifier[edges_specs] = identifier[sorted] ([ identifier[EdgeSpecs] ( identifier[targets] =[ identifier[int] ( identifier[q] ) keyword[for] identifier[q] keyword[in] identifier[e] . identifier[split] ( literal[string] )],
identifier[fBellState] = identifier[especs] . identifier[get] ( literal[string] ),
identifier[fCZ] = identifier[especs] . identifier[get] ( literal[string] ),
identifier[fCZ_std_err] = identifier[especs] . identifier[get] ( literal[string] ),
identifier[fCPHASE] = identifier[especs] . identifier[get] ( literal[string] ))
keyword[for] identifier[e] , identifier[especs] keyword[in] identifier[d] [ literal[string] ]. identifier[items] ()],
identifier[key] = keyword[lambda] identifier[edge_specs] : identifier[edge_specs] . identifier[targets] )
)
|
def from_dict(d):
"""
Re-create the Specs from a dictionary representation.
:param Dict[str, Any] d: The dictionary representation.
:return: The restored Specs.
:rtype: Specs
"""
return Specs(qubits_specs=sorted([QubitSpecs(id=int(q), fRO=qspecs.get('fRO'), f1QRB=qspecs.get('f1QRB'), T1=qspecs.get('T1'), T2=qspecs.get('T2'), fActiveReset=qspecs.get('fActiveReset')) for (q, qspecs) in d['1Q'].items()], key=lambda qubit_specs: qubit_specs.id), edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split('-')], fBellState=especs.get('fBellState'), fCZ=especs.get('fCZ'), fCZ_std_err=especs.get('fCZ_std_err'), fCPHASE=especs.get('fCPHASE')) for (e, especs) in d['2Q'].items()], key=lambda edge_specs: edge_specs.targets))
|
def steady_connection(self):
"""Get a steady, unpooled PostgreSQL connection."""
return SteadyPgConnection(self._maxusage, self._setsession, True,
*self._args, **self._kwargs)
|
def function[steady_connection, parameter[self]]:
constant[Get a steady, unpooled PostgreSQL connection.]
return[call[name[SteadyPgConnection], parameter[name[self]._maxusage, name[self]._setsession, constant[True], <ast.Starred object at 0x7da207f034f0>]]]
|
keyword[def] identifier[steady_connection] ( identifier[self] ):
literal[string]
keyword[return] identifier[SteadyPgConnection] ( identifier[self] . identifier[_maxusage] , identifier[self] . identifier[_setsession] , keyword[True] ,
* identifier[self] . identifier[_args] ,** identifier[self] . identifier[_kwargs] )
|
def steady_connection(self):
"""Get a steady, unpooled PostgreSQL connection."""
return SteadyPgConnection(self._maxusage, self._setsession, True, *self._args, **self._kwargs)
|
def address(self, s):
"""
Parse an address, any of p2pkh, p2sh, p2pkh_segwit, or p2sh_segwit.
Return a :class:`Contract <Contract>`, or None.
"""
s = parseable_str(s)
return self.p2pkh(s) or self.p2sh(s) or self.p2pkh_segwit(s) or self.p2sh_segwit(s)
|
def function[address, parameter[self, s]]:
constant[
Parse an address, any of p2pkh, p2sh, p2pkh_segwit, or p2sh_segwit.
Return a :class:`Contract <Contract>`, or None.
]
variable[s] assign[=] call[name[parseable_str], parameter[name[s]]]
return[<ast.BoolOp object at 0x7da1b1ddf160>]
|
keyword[def] identifier[address] ( identifier[self] , identifier[s] ):
literal[string]
identifier[s] = identifier[parseable_str] ( identifier[s] )
keyword[return] identifier[self] . identifier[p2pkh] ( identifier[s] ) keyword[or] identifier[self] . identifier[p2sh] ( identifier[s] ) keyword[or] identifier[self] . identifier[p2pkh_segwit] ( identifier[s] ) keyword[or] identifier[self] . identifier[p2sh_segwit] ( identifier[s] )
|
def address(self, s):
"""
Parse an address, any of p2pkh, p2sh, p2pkh_segwit, or p2sh_segwit.
Return a :class:`Contract <Contract>`, or None.
"""
s = parseable_str(s)
return self.p2pkh(s) or self.p2sh(s) or self.p2pkh_segwit(s) or self.p2sh_segwit(s)
|
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
print >> s, '%s%s%s %s' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype))
else:
for rd in self:
print >> s, '%s%s%d %s %s %s' % \
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize, **kw))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
|
def function[to_text, parameter[self, name, origin, relativize, override_rdclass]]:
constant[Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool]
if <ast.UnaryOp object at 0x7da1b2345300> begin[:]
variable[name] assign[=] call[name[name].choose_relativity, parameter[name[origin], name[relativize]]]
variable[ntext] assign[=] call[name[str], parameter[name[name]]]
variable[pad] assign[=] constant[ ]
variable[s] assign[=] call[name[StringIO].StringIO, parameter[]]
if <ast.UnaryOp object at 0x7da1b2347610> begin[:]
variable[rdclass] assign[=] name[override_rdclass]
if compare[call[name[len], parameter[name[self]]] equal[==] constant[0]] begin[:]
tuple[[<ast.BinOp object at 0x7da1b2345600>, <ast.BinOp object at 0x7da1b23466e0>]]
return[call[call[name[s].getvalue, parameter[]]][<ast.Slice object at 0x7da1b23454b0>]]
|
keyword[def] identifier[to_text] ( identifier[self] , identifier[name] = keyword[None] , identifier[origin] = keyword[None] , identifier[relativize] = keyword[True] ,
identifier[override_rdclass] = keyword[None] ,** identifier[kw] ):
literal[string]
keyword[if] keyword[not] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[name] . identifier[choose_relativity] ( identifier[origin] , identifier[relativize] )
identifier[ntext] = identifier[str] ( identifier[name] )
identifier[pad] = literal[string]
keyword[else] :
identifier[ntext] = literal[string]
identifier[pad] = literal[string]
identifier[s] = identifier[StringIO] . identifier[StringIO] ()
keyword[if] keyword[not] identifier[override_rdclass] keyword[is] keyword[None] :
identifier[rdclass] = identifier[override_rdclass]
keyword[else] :
identifier[rdclass] = identifier[self] . identifier[rdclass]
keyword[if] identifier[len] ( identifier[self] )== literal[int] :
identifier[print] >> identifier[s] , literal[string] %( identifier[ntext] , identifier[pad] ,
identifier[dns] . identifier[rdataclass] . identifier[to_text] ( identifier[rdclass] ),
identifier[dns] . identifier[rdatatype] . identifier[to_text] ( identifier[self] . identifier[rdtype] ))
keyword[else] :
keyword[for] identifier[rd] keyword[in] identifier[self] :
identifier[print] >> identifier[s] , literal[string] %( identifier[ntext] , identifier[pad] , identifier[self] . identifier[ttl] , identifier[dns] . identifier[rdataclass] . identifier[to_text] ( identifier[rdclass] ),
identifier[dns] . identifier[rdatatype] . identifier[to_text] ( identifier[self] . identifier[rdtype] ),
identifier[rd] . identifier[to_text] ( identifier[origin] = identifier[origin] , identifier[relativize] = identifier[relativize] ,** identifier[kw] ))
keyword[return] identifier[s] . identifier[getvalue] ()[:- literal[int] ]
|
def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' ' # depends on [control=['if'], data=[]]
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass # depends on [control=['if'], data=[]]
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
(print >> s, '%s%s%s %s' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype))) # depends on [control=['if'], data=[]]
else:
for rd in self:
(print >> s, '%s%s%d %s %s %s' % (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw))) # depends on [control=['for'], data=['rd']]
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
|
def write_relative_abundance(rel_abd, biomf, out_fn, sort_by=None):
"""
Given a BIOM table, calculate per-sample relative abundance for
each OTU and write out to a tab-separated file listing OTUs as
rows and Samples as columns.
:type biom: biom object
:param biom: BIOM-formatted OTU/Sample abundance data
:type out_fn: str
:param out_fn: The full path to the desired output file.
:type sort_by: function
:param sort_by: A function acting as a sorting key that will determine
the order in which the Sample IDs appear as columns in
the output file.
"""
with open(out_fn, 'w') as out_f:
sids = sorted(set(biomf.ids()), key=sort_by)
out_f.write('#OTU ID\t{}\n'.format('\t'.join(sids)))
for otuid in biomf.ids(axis="observation"):
otuName = oc.otu_name(biomf.metadata(otuid, "observation")
["taxonomy"])
sabd = [str(rel_abd[sid][otuid])
if sid in rel_abd and otuid in rel_abd[sid] else '0'
for sid in sids]
out_f.write('{}\t{}\n'.format(otuName, '\t'.join(sabd)))
|
def function[write_relative_abundance, parameter[rel_abd, biomf, out_fn, sort_by]]:
constant[
Given a BIOM table, calculate per-sample relative abundance for
each OTU and write out to a tab-separated file listing OTUs as
rows and Samples as columns.
:type biom: biom object
:param biom: BIOM-formatted OTU/Sample abundance data
:type out_fn: str
:param out_fn: The full path to the desired output file.
:type sort_by: function
:param sort_by: A function acting as a sorting key that will determine
the order in which the Sample IDs appear as columns in
the output file.
]
with call[name[open], parameter[name[out_fn], constant[w]]] begin[:]
variable[sids] assign[=] call[name[sorted], parameter[call[name[set], parameter[call[name[biomf].ids, parameter[]]]]]]
call[name[out_f].write, parameter[call[constant[#OTU ID {}
].format, parameter[call[constant[ ].join, parameter[name[sids]]]]]]]
for taget[name[otuid]] in starred[call[name[biomf].ids, parameter[]]] begin[:]
variable[otuName] assign[=] call[name[oc].otu_name, parameter[call[call[name[biomf].metadata, parameter[name[otuid], constant[observation]]]][constant[taxonomy]]]]
variable[sabd] assign[=] <ast.ListComp object at 0x7da1b2412950>
call[name[out_f].write, parameter[call[constant[{} {}
].format, parameter[name[otuName], call[constant[ ].join, parameter[name[sabd]]]]]]]
|
keyword[def] identifier[write_relative_abundance] ( identifier[rel_abd] , identifier[biomf] , identifier[out_fn] , identifier[sort_by] = keyword[None] ):
literal[string]
keyword[with] identifier[open] ( identifier[out_fn] , literal[string] ) keyword[as] identifier[out_f] :
identifier[sids] = identifier[sorted] ( identifier[set] ( identifier[biomf] . identifier[ids] ()), identifier[key] = identifier[sort_by] )
identifier[out_f] . identifier[write] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[sids] )))
keyword[for] identifier[otuid] keyword[in] identifier[biomf] . identifier[ids] ( identifier[axis] = literal[string] ):
identifier[otuName] = identifier[oc] . identifier[otu_name] ( identifier[biomf] . identifier[metadata] ( identifier[otuid] , literal[string] )
[ literal[string] ])
identifier[sabd] =[ identifier[str] ( identifier[rel_abd] [ identifier[sid] ][ identifier[otuid] ])
keyword[if] identifier[sid] keyword[in] identifier[rel_abd] keyword[and] identifier[otuid] keyword[in] identifier[rel_abd] [ identifier[sid] ] keyword[else] literal[string]
keyword[for] identifier[sid] keyword[in] identifier[sids] ]
identifier[out_f] . identifier[write] ( literal[string] . identifier[format] ( identifier[otuName] , literal[string] . identifier[join] ( identifier[sabd] )))
|
def write_relative_abundance(rel_abd, biomf, out_fn, sort_by=None):
"""
Given a BIOM table, calculate per-sample relative abundance for
each OTU and write out to a tab-separated file listing OTUs as
rows and Samples as columns.
:type biom: biom object
:param biom: BIOM-formatted OTU/Sample abundance data
:type out_fn: str
:param out_fn: The full path to the desired output file.
:type sort_by: function
:param sort_by: A function acting as a sorting key that will determine
the order in which the Sample IDs appear as columns in
the output file.
"""
with open(out_fn, 'w') as out_f:
sids = sorted(set(biomf.ids()), key=sort_by)
out_f.write('#OTU ID\t{}\n'.format('\t'.join(sids)))
for otuid in biomf.ids(axis='observation'):
otuName = oc.otu_name(biomf.metadata(otuid, 'observation')['taxonomy'])
sabd = [str(rel_abd[sid][otuid]) if sid in rel_abd and otuid in rel_abd[sid] else '0' for sid in sids]
out_f.write('{}\t{}\n'.format(otuName, '\t'.join(sabd))) # depends on [control=['for'], data=['otuid']] # depends on [control=['with'], data=['out_f']]
|
def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
""" Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased.
"""
if not instance.pk:
# Do not consider posts being created.
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist: # pragma: no cover
# This should never happen (except with django loaddata command)
return
if old_instance and old_instance.approved is True and instance.approved is False:
profile.posts_count = F('posts_count') - 1
profile.save()
|
def function[decrease_posts_count_after_post_unaproval, parameter[sender, instance]]:
constant[ Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased.
]
if <ast.UnaryOp object at 0x7da1b117a200> begin[:]
return[None]
<ast.Tuple object at 0x7da1b117a680> assign[=] call[name[ForumProfile].objects.get_or_create, parameter[]]
<ast.Try object at 0x7da1b11785e0>
if <ast.BoolOp object at 0x7da1b1178700> begin[:]
name[profile].posts_count assign[=] binary_operation[call[name[F], parameter[constant[posts_count]]] - constant[1]]
call[name[profile].save, parameter[]]
|
keyword[def] identifier[decrease_posts_count_after_post_unaproval] ( identifier[sender] , identifier[instance] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[instance] . identifier[pk] :
keyword[return]
identifier[profile] , identifier[dummy] = identifier[ForumProfile] . identifier[objects] . identifier[get_or_create] ( identifier[user] = identifier[instance] . identifier[poster] )
keyword[try] :
identifier[old_instance] = identifier[instance] . identifier[__class__] . identifier[_default_manager] . identifier[get] ( identifier[pk] = identifier[instance] . identifier[pk] )
keyword[except] identifier[ObjectDoesNotExist] :
keyword[return]
keyword[if] identifier[old_instance] keyword[and] identifier[old_instance] . identifier[approved] keyword[is] keyword[True] keyword[and] identifier[instance] . identifier[approved] keyword[is] keyword[False] :
identifier[profile] . identifier[posts_count] = identifier[F] ( literal[string] )- literal[int]
identifier[profile] . identifier[save] ()
|
def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
""" Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased.
"""
if not instance.pk:
# Do not consider posts being created.
return # depends on [control=['if'], data=[]]
(profile, dummy) = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk) # depends on [control=['try'], data=[]]
except ObjectDoesNotExist: # pragma: no cover
# This should never happen (except with django loaddata command)
return # depends on [control=['except'], data=[]]
if old_instance and old_instance.approved is True and (instance.approved is False):
profile.posts_count = F('posts_count') - 1
profile.save() # depends on [control=['if'], data=[]]
|
def from_edgerc(rcinput, section='default'):
"""Returns an EdgeGridAuth object from the configuration from the given section of the
given edgerc file.
:param filename: path to the edgerc file
:param section: the section to use (this is the [bracketed] part of the edgerc,
default is 'default')
"""
from .edgerc import EdgeRc
if isinstance(rcinput, EdgeRc):
rc = rcinput
else:
rc = EdgeRc(rcinput)
return EdgeGridAuth(
client_token=rc.get(section, 'client_token'),
client_secret=rc.get(section, 'client_secret'),
access_token=rc.get(section, 'access_token'),
headers_to_sign=rc.getlist(section, 'headers_to_sign'),
max_body=rc.getint(section, 'max_body')
)
|
def function[from_edgerc, parameter[rcinput, section]]:
constant[Returns an EdgeGridAuth object from the configuration from the given section of the
given edgerc file.
:param filename: path to the edgerc file
:param section: the section to use (this is the [bracketed] part of the edgerc,
default is 'default')
]
from relative_module[edgerc] import module[EdgeRc]
if call[name[isinstance], parameter[name[rcinput], name[EdgeRc]]] begin[:]
variable[rc] assign[=] name[rcinput]
return[call[name[EdgeGridAuth], parameter[]]]
|
keyword[def] identifier[from_edgerc] ( identifier[rcinput] , identifier[section] = literal[string] ):
literal[string]
keyword[from] . identifier[edgerc] keyword[import] identifier[EdgeRc]
keyword[if] identifier[isinstance] ( identifier[rcinput] , identifier[EdgeRc] ):
identifier[rc] = identifier[rcinput]
keyword[else] :
identifier[rc] = identifier[EdgeRc] ( identifier[rcinput] )
keyword[return] identifier[EdgeGridAuth] (
identifier[client_token] = identifier[rc] . identifier[get] ( identifier[section] , literal[string] ),
identifier[client_secret] = identifier[rc] . identifier[get] ( identifier[section] , literal[string] ),
identifier[access_token] = identifier[rc] . identifier[get] ( identifier[section] , literal[string] ),
identifier[headers_to_sign] = identifier[rc] . identifier[getlist] ( identifier[section] , literal[string] ),
identifier[max_body] = identifier[rc] . identifier[getint] ( identifier[section] , literal[string] )
)
|
def from_edgerc(rcinput, section='default'):
"""Returns an EdgeGridAuth object from the configuration from the given section of the
given edgerc file.
:param filename: path to the edgerc file
:param section: the section to use (this is the [bracketed] part of the edgerc,
default is 'default')
"""
from .edgerc import EdgeRc
if isinstance(rcinput, EdgeRc):
rc = rcinput # depends on [control=['if'], data=[]]
else:
rc = EdgeRc(rcinput)
return EdgeGridAuth(client_token=rc.get(section, 'client_token'), client_secret=rc.get(section, 'client_secret'), access_token=rc.get(section, 'access_token'), headers_to_sign=rc.getlist(section, 'headers_to_sign'), max_body=rc.getint(section, 'max_body'))
|
def _parse_req(requnit, reqval):
''' Parse a non-day fixed value '''
assert reqval[0] != '='
try:
retn = []
for val in reqval.split(','):
if requnit == 'month':
if reqval[0].isdigit():
retn.append(int(reqval)) # must be a month (1-12)
else:
try:
retn.append(list(calendar.month_abbr).index(val.title()))
except ValueError:
retn.append(list(calendar.month_name).index(val.title()))
else:
retn.append(int(val))
except ValueError:
return None
if not retn:
return None
return retn[0] if len(retn) == 1 else retn
|
def function[_parse_req, parameter[requnit, reqval]]:
constant[ Parse a non-day fixed value ]
assert[compare[call[name[reqval]][constant[0]] not_equal[!=] constant[=]]]
<ast.Try object at 0x7da1b23ee6e0>
if <ast.UnaryOp object at 0x7da18eb57e20> begin[:]
return[constant[None]]
return[<ast.IfExp object at 0x7da18eb573d0>]
|
keyword[def] identifier[_parse_req] ( identifier[requnit] , identifier[reqval] ):
literal[string]
keyword[assert] identifier[reqval] [ literal[int] ]!= literal[string]
keyword[try] :
identifier[retn] =[]
keyword[for] identifier[val] keyword[in] identifier[reqval] . identifier[split] ( literal[string] ):
keyword[if] identifier[requnit] == literal[string] :
keyword[if] identifier[reqval] [ literal[int] ]. identifier[isdigit] ():
identifier[retn] . identifier[append] ( identifier[int] ( identifier[reqval] ))
keyword[else] :
keyword[try] :
identifier[retn] . identifier[append] ( identifier[list] ( identifier[calendar] . identifier[month_abbr] ). identifier[index] ( identifier[val] . identifier[title] ()))
keyword[except] identifier[ValueError] :
identifier[retn] . identifier[append] ( identifier[list] ( identifier[calendar] . identifier[month_name] ). identifier[index] ( identifier[val] . identifier[title] ()))
keyword[else] :
identifier[retn] . identifier[append] ( identifier[int] ( identifier[val] ))
keyword[except] identifier[ValueError] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[retn] :
keyword[return] keyword[None]
keyword[return] identifier[retn] [ literal[int] ] keyword[if] identifier[len] ( identifier[retn] )== literal[int] keyword[else] identifier[retn]
|
def _parse_req(requnit, reqval):
""" Parse a non-day fixed value """
assert reqval[0] != '='
try:
retn = []
for val in reqval.split(','):
if requnit == 'month':
if reqval[0].isdigit():
retn.append(int(reqval)) # must be a month (1-12) # depends on [control=['if'], data=[]]
else:
try:
retn.append(list(calendar.month_abbr).index(val.title())) # depends on [control=['try'], data=[]]
except ValueError:
retn.append(list(calendar.month_name).index(val.title())) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
retn.append(int(val)) # depends on [control=['for'], data=['val']] # depends on [control=['try'], data=[]]
except ValueError:
return None # depends on [control=['except'], data=[]]
if not retn:
return None # depends on [control=['if'], data=[]]
return retn[0] if len(retn) == 1 else retn
|
def comment(request, template="generic/comments.html", extra_context=None):
"""
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "comment")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
form_class = import_dotted_path(settings.COMMENT_FORM_CLASS)
form = form_class(request, obj, post_data)
if form.is_valid():
url = obj.get_absolute_url()
if is_spam(request, form, url):
return redirect(url)
comment = form.save(request)
response = redirect(add_cache_bypass(comment.get_absolute_url()))
# Store commenter's details in a cookie for 90 days.
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
cookie_value = post_data.get(field, "")
set_cookie(response, cookie_name, cookie_value)
return response
elif request.is_ajax() and form.errors:
return HttpResponse(dumps({"errors": form.errors}))
# Show errors with stand-alone comment form.
context = {"obj": obj, "posted_comment_form": form}
context.update(extra_context or {})
return TemplateResponse(request, template, context)
|
def function[comment, parameter[request, template, extra_context]]:
constant[
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
]
variable[response] assign[=] call[name[initial_validation], parameter[name[request], constant[comment]]]
if call[name[isinstance], parameter[name[response], name[HttpResponse]]] begin[:]
return[name[response]]
<ast.Tuple object at 0x7da1b1367010> assign[=] name[response]
variable[form_class] assign[=] call[name[import_dotted_path], parameter[name[settings].COMMENT_FORM_CLASS]]
variable[form] assign[=] call[name[form_class], parameter[name[request], name[obj], name[post_data]]]
if call[name[form].is_valid, parameter[]] begin[:]
variable[url] assign[=] call[name[obj].get_absolute_url, parameter[]]
if call[name[is_spam], parameter[name[request], name[form], name[url]]] begin[:]
return[call[name[redirect], parameter[name[url]]]]
variable[comment] assign[=] call[name[form].save, parameter[name[request]]]
variable[response] assign[=] call[name[redirect], parameter[call[name[add_cache_bypass], parameter[call[name[comment].get_absolute_url, parameter[]]]]]]
for taget[name[field]] in starred[name[ThreadedCommentForm].cookie_fields] begin[:]
variable[cookie_name] assign[=] binary_operation[name[ThreadedCommentForm].cookie_prefix + name[field]]
variable[cookie_value] assign[=] call[name[post_data].get, parameter[name[field], constant[]]]
call[name[set_cookie], parameter[name[response], name[cookie_name], name[cookie_value]]]
return[name[response]]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da1b14610c0>, <ast.Constant object at 0x7da1b1460e20>], [<ast.Name object at 0x7da1b1461450>, <ast.Name object at 0x7da1b1463550>]]
call[name[context].update, parameter[<ast.BoolOp object at 0x7da1b1461900>]]
return[call[name[TemplateResponse], parameter[name[request], name[template], name[context]]]]
|
keyword[def] identifier[comment] ( identifier[request] , identifier[template] = literal[string] , identifier[extra_context] = keyword[None] ):
literal[string]
identifier[response] = identifier[initial_validation] ( identifier[request] , literal[string] )
keyword[if] identifier[isinstance] ( identifier[response] , identifier[HttpResponse] ):
keyword[return] identifier[response]
identifier[obj] , identifier[post_data] = identifier[response]
identifier[form_class] = identifier[import_dotted_path] ( identifier[settings] . identifier[COMMENT_FORM_CLASS] )
identifier[form] = identifier[form_class] ( identifier[request] , identifier[obj] , identifier[post_data] )
keyword[if] identifier[form] . identifier[is_valid] ():
identifier[url] = identifier[obj] . identifier[get_absolute_url] ()
keyword[if] identifier[is_spam] ( identifier[request] , identifier[form] , identifier[url] ):
keyword[return] identifier[redirect] ( identifier[url] )
identifier[comment] = identifier[form] . identifier[save] ( identifier[request] )
identifier[response] = identifier[redirect] ( identifier[add_cache_bypass] ( identifier[comment] . identifier[get_absolute_url] ()))
keyword[for] identifier[field] keyword[in] identifier[ThreadedCommentForm] . identifier[cookie_fields] :
identifier[cookie_name] = identifier[ThreadedCommentForm] . identifier[cookie_prefix] + identifier[field]
identifier[cookie_value] = identifier[post_data] . identifier[get] ( identifier[field] , literal[string] )
identifier[set_cookie] ( identifier[response] , identifier[cookie_name] , identifier[cookie_value] )
keyword[return] identifier[response]
keyword[elif] identifier[request] . identifier[is_ajax] () keyword[and] identifier[form] . identifier[errors] :
keyword[return] identifier[HttpResponse] ( identifier[dumps] ({ literal[string] : identifier[form] . identifier[errors] }))
identifier[context] ={ literal[string] : identifier[obj] , literal[string] : identifier[form] }
identifier[context] . identifier[update] ( identifier[extra_context] keyword[or] {})
keyword[return] identifier[TemplateResponse] ( identifier[request] , identifier[template] , identifier[context] )
|
def comment(request, template='generic/comments.html', extra_context=None):
"""
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, 'comment')
if isinstance(response, HttpResponse):
return response # depends on [control=['if'], data=[]]
(obj, post_data) = response
form_class = import_dotted_path(settings.COMMENT_FORM_CLASS)
form = form_class(request, obj, post_data)
if form.is_valid():
url = obj.get_absolute_url()
if is_spam(request, form, url):
return redirect(url) # depends on [control=['if'], data=[]]
comment = form.save(request)
response = redirect(add_cache_bypass(comment.get_absolute_url()))
# Store commenter's details in a cookie for 90 days.
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
cookie_value = post_data.get(field, '')
set_cookie(response, cookie_name, cookie_value) # depends on [control=['for'], data=['field']]
return response # depends on [control=['if'], data=[]]
elif request.is_ajax() and form.errors:
return HttpResponse(dumps({'errors': form.errors})) # depends on [control=['if'], data=[]]
# Show errors with stand-alone comment form.
context = {'obj': obj, 'posted_comment_form': form}
context.update(extra_context or {})
return TemplateResponse(request, template, context)
|
def path_requests(self, path):
"""
Returns a Resource instace that will have attributes, one for each of the http-methods
supported on that path. For example:
>>> hcl_api = client.path_requests('/api/hcl/{id}')
>>> dir(hcl_api)
[u'delete', u'get', u'put']
>>> resp, ok = hcl_api.get(id='Arista_vEOS')
Parameters
----------
path : str
The API path
Returns
-------
Resource
instance that has attributes for methods available.
"""
path_spec = self.client.origin_spec['paths'].get(path)
if not path_spec:
raise RuntimeError("no path found for: %s" % path)
get_for_meth = self.client.swagger_spec.get_op_for_request
rsrc = BravadoResource(name=path, ops={
method: get_for_meth(method, path)
for method in path_spec.keys()})
return RequestFactory.Resource(self.client, ResourceDecorator(rsrc))
|
def function[path_requests, parameter[self, path]]:
constant[
Returns a Resource instace that will have attributes, one for each of the http-methods
supported on that path. For example:
>>> hcl_api = client.path_requests('/api/hcl/{id}')
>>> dir(hcl_api)
[u'delete', u'get', u'put']
>>> resp, ok = hcl_api.get(id='Arista_vEOS')
Parameters
----------
path : str
The API path
Returns
-------
Resource
instance that has attributes for methods available.
]
variable[path_spec] assign[=] call[call[name[self].client.origin_spec][constant[paths]].get, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da20c992980> begin[:]
<ast.Raise object at 0x7da20c992f50>
variable[get_for_meth] assign[=] name[self].client.swagger_spec.get_op_for_request
variable[rsrc] assign[=] call[name[BravadoResource], parameter[]]
return[call[name[RequestFactory].Resource, parameter[name[self].client, call[name[ResourceDecorator], parameter[name[rsrc]]]]]]
|
keyword[def] identifier[path_requests] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path_spec] = identifier[self] . identifier[client] . identifier[origin_spec] [ literal[string] ]. identifier[get] ( identifier[path] )
keyword[if] keyword[not] identifier[path_spec] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[path] )
identifier[get_for_meth] = identifier[self] . identifier[client] . identifier[swagger_spec] . identifier[get_op_for_request]
identifier[rsrc] = identifier[BravadoResource] ( identifier[name] = identifier[path] , identifier[ops] ={
identifier[method] : identifier[get_for_meth] ( identifier[method] , identifier[path] )
keyword[for] identifier[method] keyword[in] identifier[path_spec] . identifier[keys] ()})
keyword[return] identifier[RequestFactory] . identifier[Resource] ( identifier[self] . identifier[client] , identifier[ResourceDecorator] ( identifier[rsrc] ))
|
def path_requests(self, path):
"""
Returns a Resource instace that will have attributes, one for each of the http-methods
supported on that path. For example:
>>> hcl_api = client.path_requests('/api/hcl/{id}')
>>> dir(hcl_api)
[u'delete', u'get', u'put']
>>> resp, ok = hcl_api.get(id='Arista_vEOS')
Parameters
----------
path : str
The API path
Returns
-------
Resource
instance that has attributes for methods available.
"""
path_spec = self.client.origin_spec['paths'].get(path)
if not path_spec:
raise RuntimeError('no path found for: %s' % path) # depends on [control=['if'], data=[]]
get_for_meth = self.client.swagger_spec.get_op_for_request
rsrc = BravadoResource(name=path, ops={method: get_for_meth(method, path) for method in path_spec.keys()})
return RequestFactory.Resource(self.client, ResourceDecorator(rsrc))
|
def open(self):
"""Open the files
"""
segments = []
files = config.get('env', 'jpl', fallback=[])
if not files:
raise JplConfigError("No JPL file defined")
# Extraction of segments from each .bsp file
for filepath in files:
filepath = Path(filepath)
if filepath.suffix.lower() != ".bsp":
continue
segments.extend(SPK.open(str(filepath)).segments)
if not segments:
raise JplError("No segment loaded")
# list of available segments
self.segments = dict(((s.center, s.target), s) for s in segments)
# This variable will contain the Target of reference from which
# all relations between frames are linked
targets = {}
for center_id, target_id in self.segments.keys():
center_name = target_names.get(center_id, 'Unknown')
target_name = target_names.get(target_id, 'Unknown')
# Retrieval of the Target object representing the center if it exists
# or creation of said object if it doesn't.
center = targets.setdefault(center_id, Target(center_name, center_id))
target = targets.setdefault(target_id, Target(target_name, target_id))
# Link between the Target objects (see Node2)
center + target
# We take the Earth target and make it the top of the structure.
# That way, it is easy to link it to the already declared earth-centered reference frames
# from the `frames.frame` module.
self.top = targets[399]
|
def function[open, parameter[self]]:
constant[Open the files
]
variable[segments] assign[=] list[[]]
variable[files] assign[=] call[name[config].get, parameter[constant[env], constant[jpl]]]
if <ast.UnaryOp object at 0x7da18eb55480> begin[:]
<ast.Raise object at 0x7da18eb57880>
for taget[name[filepath]] in starred[name[files]] begin[:]
variable[filepath] assign[=] call[name[Path], parameter[name[filepath]]]
if compare[call[name[filepath].suffix.lower, parameter[]] not_equal[!=] constant[.bsp]] begin[:]
continue
call[name[segments].extend, parameter[call[name[SPK].open, parameter[call[name[str], parameter[name[filepath]]]]].segments]]
if <ast.UnaryOp object at 0x7da18eb55150> begin[:]
<ast.Raise object at 0x7da18eb57b20>
name[self].segments assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18eb54d30>]]
variable[targets] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18eb56770>, <ast.Name object at 0x7da18eb54130>]]] in starred[call[name[self].segments.keys, parameter[]]] begin[:]
variable[center_name] assign[=] call[name[target_names].get, parameter[name[center_id], constant[Unknown]]]
variable[target_name] assign[=] call[name[target_names].get, parameter[name[target_id], constant[Unknown]]]
variable[center] assign[=] call[name[targets].setdefault, parameter[name[center_id], call[name[Target], parameter[name[center_name], name[center_id]]]]]
variable[target] assign[=] call[name[targets].setdefault, parameter[name[target_id], call[name[Target], parameter[name[target_name], name[target_id]]]]]
binary_operation[name[center] + name[target]]
name[self].top assign[=] call[name[targets]][constant[399]]
|
keyword[def] identifier[open] ( identifier[self] ):
literal[string]
identifier[segments] =[]
identifier[files] = identifier[config] . identifier[get] ( literal[string] , literal[string] , identifier[fallback] =[])
keyword[if] keyword[not] identifier[files] :
keyword[raise] identifier[JplConfigError] ( literal[string] )
keyword[for] identifier[filepath] keyword[in] identifier[files] :
identifier[filepath] = identifier[Path] ( identifier[filepath] )
keyword[if] identifier[filepath] . identifier[suffix] . identifier[lower] ()!= literal[string] :
keyword[continue]
identifier[segments] . identifier[extend] ( identifier[SPK] . identifier[open] ( identifier[str] ( identifier[filepath] )). identifier[segments] )
keyword[if] keyword[not] identifier[segments] :
keyword[raise] identifier[JplError] ( literal[string] )
identifier[self] . identifier[segments] = identifier[dict] ((( identifier[s] . identifier[center] , identifier[s] . identifier[target] ), identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[segments] )
identifier[targets] ={}
keyword[for] identifier[center_id] , identifier[target_id] keyword[in] identifier[self] . identifier[segments] . identifier[keys] ():
identifier[center_name] = identifier[target_names] . identifier[get] ( identifier[center_id] , literal[string] )
identifier[target_name] = identifier[target_names] . identifier[get] ( identifier[target_id] , literal[string] )
identifier[center] = identifier[targets] . identifier[setdefault] ( identifier[center_id] , identifier[Target] ( identifier[center_name] , identifier[center_id] ))
identifier[target] = identifier[targets] . identifier[setdefault] ( identifier[target_id] , identifier[Target] ( identifier[target_name] , identifier[target_id] ))
identifier[center] + identifier[target]
identifier[self] . identifier[top] = identifier[targets] [ literal[int] ]
|
def open(self):
"""Open the files
"""
segments = []
files = config.get('env', 'jpl', fallback=[])
if not files:
raise JplConfigError('No JPL file defined') # depends on [control=['if'], data=[]]
# Extraction of segments from each .bsp file
for filepath in files:
filepath = Path(filepath)
if filepath.suffix.lower() != '.bsp':
continue # depends on [control=['if'], data=[]]
segments.extend(SPK.open(str(filepath)).segments) # depends on [control=['for'], data=['filepath']]
if not segments:
raise JplError('No segment loaded') # depends on [control=['if'], data=[]]
# list of available segments
self.segments = dict((((s.center, s.target), s) for s in segments))
# This variable will contain the Target of reference from which
# all relations between frames are linked
targets = {}
for (center_id, target_id) in self.segments.keys():
center_name = target_names.get(center_id, 'Unknown')
target_name = target_names.get(target_id, 'Unknown')
# Retrieval of the Target object representing the center if it exists
# or creation of said object if it doesn't.
center = targets.setdefault(center_id, Target(center_name, center_id))
target = targets.setdefault(target_id, Target(target_name, target_id))
# Link between the Target objects (see Node2)
center + target # depends on [control=['for'], data=[]]
# We take the Earth target and make it the top of the structure.
# That way, it is easy to link it to the already declared earth-centered reference frames
# from the `frames.frame` module.
self.top = targets[399]
|
def copy_to(name,
source,
dest,
exec_driver=None,
overwrite=False,
makedirs=False):
'''
Copy a file from the host into a container
name
Container name
source
File to be copied to the container. Can be a local path on the Minion
or a remote file from the Salt fileserver.
dest
Destination on the container. Must be an absolute path. If the
destination is a directory, the file will be copied into that
directory.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``dest`` argument, an error will be raised.
makedirs : False
Create the parent directory on the container if it does not already
exist.
**RETURN DATA**
A boolean (``True`` if successful, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.copy_to mycontainer /tmp/foo /root/foo
'''
if exec_driver is None:
exec_driver = _get_exec_driver()
return __salt__['container_resource.copy_to'](
name,
__salt__['container_resource.cache_file'](source),
dest,
container_type=__virtualname__,
exec_driver=exec_driver,
overwrite=overwrite,
makedirs=makedirs)
|
def function[copy_to, parameter[name, source, dest, exec_driver, overwrite, makedirs]]:
constant[
Copy a file from the host into a container
name
Container name
source
File to be copied to the container. Can be a local path on the Minion
or a remote file from the Salt fileserver.
dest
Destination on the container. Must be an absolute path. If the
destination is a directory, the file will be copied into that
directory.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``dest`` argument, an error will be raised.
makedirs : False
Create the parent directory on the container if it does not already
exist.
**RETURN DATA**
A boolean (``True`` if successful, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.copy_to mycontainer /tmp/foo /root/foo
]
if compare[name[exec_driver] is constant[None]] begin[:]
variable[exec_driver] assign[=] call[name[_get_exec_driver], parameter[]]
return[call[call[name[__salt__]][constant[container_resource.copy_to]], parameter[name[name], call[call[name[__salt__]][constant[container_resource.cache_file]], parameter[name[source]]], name[dest]]]]
|
keyword[def] identifier[copy_to] ( identifier[name] ,
identifier[source] ,
identifier[dest] ,
identifier[exec_driver] = keyword[None] ,
identifier[overwrite] = keyword[False] ,
identifier[makedirs] = keyword[False] ):
literal[string]
keyword[if] identifier[exec_driver] keyword[is] keyword[None] :
identifier[exec_driver] = identifier[_get_exec_driver] ()
keyword[return] identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[__salt__] [ literal[string] ]( identifier[source] ),
identifier[dest] ,
identifier[container_type] = identifier[__virtualname__] ,
identifier[exec_driver] = identifier[exec_driver] ,
identifier[overwrite] = identifier[overwrite] ,
identifier[makedirs] = identifier[makedirs] )
|
def copy_to(name, source, dest, exec_driver=None, overwrite=False, makedirs=False):
"""
Copy a file from the host into a container
name
Container name
source
File to be copied to the container. Can be a local path on the Minion
or a remote file from the Salt fileserver.
dest
Destination on the container. Must be an absolute path. If the
destination is a directory, the file will be copied into that
directory.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``dest`` argument, an error will be raised.
makedirs : False
Create the parent directory on the container if it does not already
exist.
**RETURN DATA**
A boolean (``True`` if successful, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.copy_to mycontainer /tmp/foo /root/foo
"""
if exec_driver is None:
exec_driver = _get_exec_driver() # depends on [control=['if'], data=['exec_driver']]
return __salt__['container_resource.copy_to'](name, __salt__['container_resource.cache_file'](source), dest, container_type=__virtualname__, exec_driver=exec_driver, overwrite=overwrite, makedirs=makedirs)
|
def make_hashcode(uri, payload, headers):
"""Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code
"""
def dict_to_json_str(data):
return json.dumps(data, sort_keys=True)
content = ':'.join([uri, dict_to_json_str(payload), dict_to_json_str(headers)])
hashcode = hashlib.sha1(content.encode('utf-8'))
return hashcode.hexdigest()
|
def function[make_hashcode, parameter[uri, payload, headers]]:
constant[Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code
]
def function[dict_to_json_str, parameter[data]]:
return[call[name[json].dumps, parameter[name[data]]]]
variable[content] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da1b0383550>, <ast.Call object at 0x7da1b0382a40>, <ast.Call object at 0x7da1b0380bb0>]]]]
variable[hashcode] assign[=] call[name[hashlib].sha1, parameter[call[name[content].encode, parameter[constant[utf-8]]]]]
return[call[name[hashcode].hexdigest, parameter[]]]
|
keyword[def] identifier[make_hashcode] ( identifier[uri] , identifier[payload] , identifier[headers] ):
literal[string]
keyword[def] identifier[dict_to_json_str] ( identifier[data] ):
keyword[return] identifier[json] . identifier[dumps] ( identifier[data] , identifier[sort_keys] = keyword[True] )
identifier[content] = literal[string] . identifier[join] ([ identifier[uri] , identifier[dict_to_json_str] ( identifier[payload] ), identifier[dict_to_json_str] ( identifier[headers] )])
identifier[hashcode] = identifier[hashlib] . identifier[sha1] ( identifier[content] . identifier[encode] ( literal[string] ))
keyword[return] identifier[hashcode] . identifier[hexdigest] ()
|
def make_hashcode(uri, payload, headers):
"""Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code
"""
def dict_to_json_str(data):
return json.dumps(data, sort_keys=True)
content = ':'.join([uri, dict_to_json_str(payload), dict_to_json_str(headers)])
hashcode = hashlib.sha1(content.encode('utf-8'))
return hashcode.hexdigest()
|
def create(item_data, item_id,
observation_data = None,
user_id = None, target = None,
weights = 'auto',
similarity_metrics = 'auto',
item_data_transform = 'auto',
max_item_neighborhood_size = 64, verbose=True):
"""Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns]
"""
from turicreate._cython.cy_server import QuietProgress
# item_data is correct type
if not isinstance(item_data, _SFrame) or item_data.num_rows() == 0:
raise TypeError("`item_data` argument must be a non-empty SFrame giving item data to use for similarities.")
# Error checking on column names
item_columns = set(item_data.column_names())
if item_id not in item_columns:
raise ValueError("Item column given as 'item_id = %s', but this is not found in `item_data` SFrame."
% item_id)
# Now, get the set ready to test for other argument issues.
item_columns.remove(item_id)
if weights != 'auto':
if type(weights) is not dict:
raise TypeError("`weights` parameter must be 'auto' or a dictionary of column "
"names in `item_data` to weight values.")
bad_columns = [col_name for col_name in item_columns if col_name not in item_columns]
if bad_columns:
raise ValueError("Columns %s given in weights, but these are not found in item_data."
% ', '.join(bad_columns))
# Now, set any columns not given in the weights column to be
# weight 0.
for col_name in item_columns:
weights.setdefault(col_name, 0)
################################################################################
# Now, check the feature transformer stuff.
# Pass it through a feature transformer.
if item_data_transform == 'auto':
item_data_transform = _turicreate.toolkits._feature_engineering.AutoVectorizer(excluded_features = [item_id])
if not isinstance(item_data_transform, _turicreate.toolkits._feature_engineering.TransformerBase):
raise TypeError("item_data_transform must be 'auto' or a valid feature_engineering transformer instance.")
# Transform the input data.
item_data = item_data_transform.fit_transform(item_data)
# Translate any string columns to actually work in nearest
# neighbors by making it a categorical list. Also translate lists
# into dicts, and normalize numeric columns.
gaussian_kernel_metrics = set()
for c in item_columns:
if item_data[c].dtype is str:
item_data[c] = item_data[c].apply(lambda s: {s : 1})
elif item_data[c].dtype in [float, int]:
item_data[c] = (item_data[c] - item_data[c].mean()) / max(item_data[c].std(), 1e-8)
gaussian_kernel_metrics.add(c)
if verbose:
print("Applying transform:")
print(item_data_transform)
opts = {}
model_proxy = _turicreate.extensions.item_content_recommender()
model_proxy.init_options(opts)
# The user_id is implicit if none is given.
if user_id is None:
user_id = "__implicit_user__"
normalization_factor = 1
# Set the observation data.
if observation_data is None:
# In this case, it's important to make this a string type. If
# the user column is not given, it may be given at recommend
# time, in which case it is cast to a string type and cast
# back if necessary.
empty_user = _turicreate.SArray([], dtype=str)
empty_item = _turicreate.SArray([], dtype=item_data[item_id].dtype)
observation_data = _turicreate.SFrame( {user_id : empty_user, item_id : empty_item} )
# Now, work out stuff for the observation_data component
normalization_factor = 1
# 1 for the item_id column.
if item_data.num_columns() >= 3:
if weights == "auto":
# TODO: automatically tune this.
weights = {col_name : 1 for col_name in item_data.column_names() if col_name != item_id}
# Use the abs value here in case users pass in weights with negative values.
normalization_factor = sum(abs(v) for v in weights.values())
if normalization_factor == 0:
raise ValueError("Weights cannot all be set to 0.")
distance = [([col_name], ("gaussian_kernel" if col_name in gaussian_kernel_metrics else "cosine"), weight)
for col_name, weight in weights.items()]
else:
distance = "cosine"
# Now, build the nearest neighbors model:
nn = _turicreate.nearest_neighbors.create(item_data, label=item_id, distance = distance, verbose = verbose)
graph = nn.query(item_data, label = item_id, k=max_item_neighborhood_size, verbose = verbose)
graph = graph.rename({"query_label" : item_id,
"reference_label" : "similar",
"distance" : "score"}, inplace=True)
def process_weights(x):
return max(-1, min(1, 1 - x / normalization_factor))
graph["score"] = graph["score"].apply(process_weights)
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type' : "cosine",
'max_item_neighborhood_size' : max_item_neighborhood_size}
user_data = _turicreate.SFrame()
extra_data = {"nearest_items" : graph}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemContentRecommender(model_proxy)
|
def function[create, parameter[item_data, item_id, observation_data, user_id, target, weights, similarity_metrics, item_data_transform, max_item_neighborhood_size, verbose]]:
constant[Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns]
]
from relative_module[turicreate._cython.cy_server] import module[QuietProgress]
if <ast.BoolOp object at 0x7da1b204bac0> begin[:]
<ast.Raise object at 0x7da1b204b8b0>
variable[item_columns] assign[=] call[name[set], parameter[call[name[item_data].column_names, parameter[]]]]
if compare[name[item_id] <ast.NotIn object at 0x7da2590d7190> name[item_columns]] begin[:]
<ast.Raise object at 0x7da1b1f744c0>
call[name[item_columns].remove, parameter[name[item_id]]]
if compare[name[weights] not_equal[!=] constant[auto]] begin[:]
if compare[call[name[type], parameter[name[weights]]] is_not name[dict]] begin[:]
<ast.Raise object at 0x7da1b1f77640>
variable[bad_columns] assign[=] <ast.ListComp object at 0x7da1b1f75240>
if name[bad_columns] begin[:]
<ast.Raise object at 0x7da1b1f76290>
for taget[name[col_name]] in starred[name[item_columns]] begin[:]
call[name[weights].setdefault, parameter[name[col_name], constant[0]]]
if compare[name[item_data_transform] equal[==] constant[auto]] begin[:]
variable[item_data_transform] assign[=] call[name[_turicreate].toolkits._feature_engineering.AutoVectorizer, parameter[]]
if <ast.UnaryOp object at 0x7da1b204b400> begin[:]
<ast.Raise object at 0x7da1b204b280>
variable[item_data] assign[=] call[name[item_data_transform].fit_transform, parameter[name[item_data]]]
variable[gaussian_kernel_metrics] assign[=] call[name[set], parameter[]]
for taget[name[c]] in starred[name[item_columns]] begin[:]
if compare[call[name[item_data]][name[c]].dtype is name[str]] begin[:]
call[name[item_data]][name[c]] assign[=] call[call[name[item_data]][name[c]].apply, parameter[<ast.Lambda object at 0x7da1b2049cc0>]]
if name[verbose] begin[:]
call[name[print], parameter[constant[Applying transform:]]]
call[name[print], parameter[name[item_data_transform]]]
variable[opts] assign[=] dictionary[[], []]
variable[model_proxy] assign[=] call[name[_turicreate].extensions.item_content_recommender, parameter[]]
call[name[model_proxy].init_options, parameter[name[opts]]]
if compare[name[user_id] is constant[None]] begin[:]
variable[user_id] assign[=] constant[__implicit_user__]
variable[normalization_factor] assign[=] constant[1]
if compare[name[observation_data] is constant[None]] begin[:]
variable[empty_user] assign[=] call[name[_turicreate].SArray, parameter[list[[]]]]
variable[empty_item] assign[=] call[name[_turicreate].SArray, parameter[list[[]]]]
variable[observation_data] assign[=] call[name[_turicreate].SFrame, parameter[dictionary[[<ast.Name object at 0x7da1b2048850>, <ast.Name object at 0x7da1b2048820>], [<ast.Name object at 0x7da1b20487f0>, <ast.Name object at 0x7da1b20487c0>]]]]
variable[normalization_factor] assign[=] constant[1]
if compare[call[name[item_data].num_columns, parameter[]] greater_or_equal[>=] constant[3]] begin[:]
if compare[name[weights] equal[==] constant[auto]] begin[:]
variable[weights] assign[=] <ast.DictComp object at 0x7da1b20484c0>
variable[normalization_factor] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b20481c0>]]
if compare[name[normalization_factor] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b2028c10>
variable[distance] assign[=] <ast.ListComp object at 0x7da1b2028d90>
variable[nn] assign[=] call[name[_turicreate].nearest_neighbors.create, parameter[name[item_data]]]
variable[graph] assign[=] call[name[nn].query, parameter[name[item_data]]]
variable[graph] assign[=] call[name[graph].rename, parameter[dictionary[[<ast.Constant object at 0x7da1b2028bb0>, <ast.Constant object at 0x7da1b2028ca0>, <ast.Constant object at 0x7da1b2028cd0>], [<ast.Name object at 0x7da1b2028e80>, <ast.Constant object at 0x7da1b202a6b0>, <ast.Constant object at 0x7da1b202a9e0>]]]]
def function[process_weights, parameter[x]]:
return[call[name[max], parameter[<ast.UnaryOp object at 0x7da1b202b190>, call[name[min], parameter[constant[1], binary_operation[constant[1] - binary_operation[name[x] / name[normalization_factor]]]]]]]]
call[name[graph]][constant[score]] assign[=] call[call[name[graph]][constant[score]].apply, parameter[name[process_weights]]]
variable[opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b202b4c0>, <ast.Constant object at 0x7da1b202b460>, <ast.Constant object at 0x7da1b202b3a0>, <ast.Constant object at 0x7da1b202b430>, <ast.Constant object at 0x7da1b202b400>], [<ast.Name object at 0x7da1b202b490>, <ast.Name object at 0x7da1b202b370>, <ast.Name object at 0x7da1b202b310>, <ast.Constant object at 0x7da1b202b340>, <ast.Name object at 0x7da1b202b520>]]
variable[user_data] assign[=] call[name[_turicreate].SFrame, parameter[]]
variable[extra_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b202b070>], [<ast.Name object at 0x7da1b2029360>]]
with call[name[QuietProgress], parameter[name[verbose]]] begin[:]
call[name[model_proxy].train, parameter[name[observation_data], name[user_data], name[item_data], name[opts], name[extra_data]]]
return[call[name[ItemContentRecommender], parameter[name[model_proxy]]]]
|
keyword[def] identifier[create] ( identifier[item_data] , identifier[item_id] ,
identifier[observation_data] = keyword[None] ,
identifier[user_id] = keyword[None] , identifier[target] = keyword[None] ,
identifier[weights] = literal[string] ,
identifier[similarity_metrics] = literal[string] ,
identifier[item_data_transform] = literal[string] ,
identifier[max_item_neighborhood_size] = literal[int] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[from] identifier[turicreate] . identifier[_cython] . identifier[cy_server] keyword[import] identifier[QuietProgress]
keyword[if] keyword[not] identifier[isinstance] ( identifier[item_data] , identifier[_SFrame] ) keyword[or] identifier[item_data] . identifier[num_rows] ()== literal[int] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[item_columns] = identifier[set] ( identifier[item_data] . identifier[column_names] ())
keyword[if] identifier[item_id] keyword[not] keyword[in] identifier[item_columns] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[item_id] )
identifier[item_columns] . identifier[remove] ( identifier[item_id] )
keyword[if] identifier[weights] != literal[string] :
keyword[if] identifier[type] ( identifier[weights] ) keyword[is] keyword[not] identifier[dict] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
identifier[bad_columns] =[ identifier[col_name] keyword[for] identifier[col_name] keyword[in] identifier[item_columns] keyword[if] identifier[col_name] keyword[not] keyword[in] identifier[item_columns] ]
keyword[if] identifier[bad_columns] :
keyword[raise] identifier[ValueError] ( literal[string]
% literal[string] . identifier[join] ( identifier[bad_columns] ))
keyword[for] identifier[col_name] keyword[in] identifier[item_columns] :
identifier[weights] . identifier[setdefault] ( identifier[col_name] , literal[int] )
keyword[if] identifier[item_data_transform] == literal[string] :
identifier[item_data_transform] = identifier[_turicreate] . identifier[toolkits] . identifier[_feature_engineering] . identifier[AutoVectorizer] ( identifier[excluded_features] =[ identifier[item_id] ])
keyword[if] keyword[not] identifier[isinstance] ( identifier[item_data_transform] , identifier[_turicreate] . identifier[toolkits] . identifier[_feature_engineering] . identifier[TransformerBase] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[item_data] = identifier[item_data_transform] . identifier[fit_transform] ( identifier[item_data] )
identifier[gaussian_kernel_metrics] = identifier[set] ()
keyword[for] identifier[c] keyword[in] identifier[item_columns] :
keyword[if] identifier[item_data] [ identifier[c] ]. identifier[dtype] keyword[is] identifier[str] :
identifier[item_data] [ identifier[c] ]= identifier[item_data] [ identifier[c] ]. identifier[apply] ( keyword[lambda] identifier[s] :{ identifier[s] : literal[int] })
keyword[elif] identifier[item_data] [ identifier[c] ]. identifier[dtype] keyword[in] [ identifier[float] , identifier[int] ]:
identifier[item_data] [ identifier[c] ]=( identifier[item_data] [ identifier[c] ]- identifier[item_data] [ identifier[c] ]. identifier[mean] ())/ identifier[max] ( identifier[item_data] [ identifier[c] ]. identifier[std] (), literal[int] )
identifier[gaussian_kernel_metrics] . identifier[add] ( identifier[c] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[item_data_transform] )
identifier[opts] ={}
identifier[model_proxy] = identifier[_turicreate] . identifier[extensions] . identifier[item_content_recommender] ()
identifier[model_proxy] . identifier[init_options] ( identifier[opts] )
keyword[if] identifier[user_id] keyword[is] keyword[None] :
identifier[user_id] = literal[string]
identifier[normalization_factor] = literal[int]
keyword[if] identifier[observation_data] keyword[is] keyword[None] :
identifier[empty_user] = identifier[_turicreate] . identifier[SArray] ([], identifier[dtype] = identifier[str] )
identifier[empty_item] = identifier[_turicreate] . identifier[SArray] ([], identifier[dtype] = identifier[item_data] [ identifier[item_id] ]. identifier[dtype] )
identifier[observation_data] = identifier[_turicreate] . identifier[SFrame] ({ identifier[user_id] : identifier[empty_user] , identifier[item_id] : identifier[empty_item] })
identifier[normalization_factor] = literal[int]
keyword[if] identifier[item_data] . identifier[num_columns] ()>= literal[int] :
keyword[if] identifier[weights] == literal[string] :
identifier[weights] ={ identifier[col_name] : literal[int] keyword[for] identifier[col_name] keyword[in] identifier[item_data] . identifier[column_names] () keyword[if] identifier[col_name] != identifier[item_id] }
identifier[normalization_factor] = identifier[sum] ( identifier[abs] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[weights] . identifier[values] ())
keyword[if] identifier[normalization_factor] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[distance] =[([ identifier[col_name] ],( literal[string] keyword[if] identifier[col_name] keyword[in] identifier[gaussian_kernel_metrics] keyword[else] literal[string] ), identifier[weight] )
keyword[for] identifier[col_name] , identifier[weight] keyword[in] identifier[weights] . identifier[items] ()]
keyword[else] :
identifier[distance] = literal[string]
identifier[nn] = identifier[_turicreate] . identifier[nearest_neighbors] . identifier[create] ( identifier[item_data] , identifier[label] = identifier[item_id] , identifier[distance] = identifier[distance] , identifier[verbose] = identifier[verbose] )
identifier[graph] = identifier[nn] . identifier[query] ( identifier[item_data] , identifier[label] = identifier[item_id] , identifier[k] = identifier[max_item_neighborhood_size] , identifier[verbose] = identifier[verbose] )
identifier[graph] = identifier[graph] . identifier[rename] ({ literal[string] : identifier[item_id] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }, identifier[inplace] = keyword[True] )
keyword[def] identifier[process_weights] ( identifier[x] ):
keyword[return] identifier[max] (- literal[int] , identifier[min] ( literal[int] , literal[int] - identifier[x] / identifier[normalization_factor] ))
identifier[graph] [ literal[string] ]= identifier[graph] [ literal[string] ]. identifier[apply] ( identifier[process_weights] )
identifier[opts] ={ literal[string] : identifier[user_id] ,
literal[string] : identifier[item_id] ,
literal[string] : identifier[target] ,
literal[string] : literal[string] ,
literal[string] : identifier[max_item_neighborhood_size] }
identifier[user_data] = identifier[_turicreate] . identifier[SFrame] ()
identifier[extra_data] ={ literal[string] : identifier[graph] }
keyword[with] identifier[QuietProgress] ( identifier[verbose] ):
identifier[model_proxy] . identifier[train] ( identifier[observation_data] , identifier[user_data] , identifier[item_data] , identifier[opts] , identifier[extra_data] )
keyword[return] identifier[ItemContentRecommender] ( identifier[model_proxy] )
|
def create(item_data, item_id, observation_data=None, user_id=None, target=None, weights='auto', similarity_metrics='auto', item_data_transform='auto', max_item_neighborhood_size=64, verbose=True):
"""Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns]
"""
from turicreate._cython.cy_server import QuietProgress
# item_data is correct type
if not isinstance(item_data, _SFrame) or item_data.num_rows() == 0:
raise TypeError('`item_data` argument must be a non-empty SFrame giving item data to use for similarities.') # depends on [control=['if'], data=[]]
# Error checking on column names
item_columns = set(item_data.column_names())
if item_id not in item_columns:
raise ValueError("Item column given as 'item_id = %s', but this is not found in `item_data` SFrame." % item_id) # depends on [control=['if'], data=['item_id']]
# Now, get the set ready to test for other argument issues.
item_columns.remove(item_id)
if weights != 'auto':
if type(weights) is not dict:
raise TypeError("`weights` parameter must be 'auto' or a dictionary of column names in `item_data` to weight values.") # depends on [control=['if'], data=[]]
bad_columns = [col_name for col_name in item_columns if col_name not in item_columns]
if bad_columns:
raise ValueError('Columns %s given in weights, but these are not found in item_data.' % ', '.join(bad_columns)) # depends on [control=['if'], data=[]]
# Now, set any columns not given in the weights column to be
# weight 0.
for col_name in item_columns:
weights.setdefault(col_name, 0) # depends on [control=['for'], data=['col_name']] # depends on [control=['if'], data=['weights']]
################################################################################
# Now, check the feature transformer stuff.
# Pass it through a feature transformer.
if item_data_transform == 'auto':
item_data_transform = _turicreate.toolkits._feature_engineering.AutoVectorizer(excluded_features=[item_id]) # depends on [control=['if'], data=['item_data_transform']]
if not isinstance(item_data_transform, _turicreate.toolkits._feature_engineering.TransformerBase):
raise TypeError("item_data_transform must be 'auto' or a valid feature_engineering transformer instance.") # depends on [control=['if'], data=[]]
# Transform the input data.
item_data = item_data_transform.fit_transform(item_data)
# Translate any string columns to actually work in nearest
# neighbors by making it a categorical list. Also translate lists
# into dicts, and normalize numeric columns.
gaussian_kernel_metrics = set()
for c in item_columns:
if item_data[c].dtype is str:
item_data[c] = item_data[c].apply(lambda s: {s: 1}) # depends on [control=['if'], data=[]]
elif item_data[c].dtype in [float, int]:
item_data[c] = (item_data[c] - item_data[c].mean()) / max(item_data[c].std(), 1e-08)
gaussian_kernel_metrics.add(c) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
if verbose:
print('Applying transform:')
print(item_data_transform) # depends on [control=['if'], data=[]]
opts = {}
model_proxy = _turicreate.extensions.item_content_recommender()
model_proxy.init_options(opts)
# The user_id is implicit if none is given.
if user_id is None:
user_id = '__implicit_user__' # depends on [control=['if'], data=['user_id']]
normalization_factor = 1
# Set the observation data.
if observation_data is None:
# In this case, it's important to make this a string type. If
# the user column is not given, it may be given at recommend
# time, in which case it is cast to a string type and cast
# back if necessary.
empty_user = _turicreate.SArray([], dtype=str)
empty_item = _turicreate.SArray([], dtype=item_data[item_id].dtype)
observation_data = _turicreate.SFrame({user_id: empty_user, item_id: empty_item}) # depends on [control=['if'], data=['observation_data']]
# Now, work out stuff for the observation_data component
normalization_factor = 1
# 1 for the item_id column.
if item_data.num_columns() >= 3:
if weights == 'auto':
# TODO: automatically tune this.
weights = {col_name: 1 for col_name in item_data.column_names() if col_name != item_id} # depends on [control=['if'], data=['weights']]
# Use the abs value here in case users pass in weights with negative values.
normalization_factor = sum((abs(v) for v in weights.values()))
if normalization_factor == 0:
raise ValueError('Weights cannot all be set to 0.') # depends on [control=['if'], data=[]]
distance = [([col_name], 'gaussian_kernel' if col_name in gaussian_kernel_metrics else 'cosine', weight) for (col_name, weight) in weights.items()] # depends on [control=['if'], data=[]]
else:
distance = 'cosine'
# Now, build the nearest neighbors model:
nn = _turicreate.nearest_neighbors.create(item_data, label=item_id, distance=distance, verbose=verbose)
graph = nn.query(item_data, label=item_id, k=max_item_neighborhood_size, verbose=verbose)
graph = graph.rename({'query_label': item_id, 'reference_label': 'similar', 'distance': 'score'}, inplace=True)
def process_weights(x):
return max(-1, min(1, 1 - x / normalization_factor))
graph['score'] = graph['score'].apply(process_weights)
opts = {'user_id': user_id, 'item_id': item_id, 'target': target, 'similarity_type': 'cosine', 'max_item_neighborhood_size': max_item_neighborhood_size}
user_data = _turicreate.SFrame()
extra_data = {'nearest_items': graph}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data) # depends on [control=['with'], data=[]]
return ItemContentRecommender(model_proxy)
|
def build_preauth_str(preauth_key, account_name, timestamp, expires,
admin=False):
""" Builds the preauth string and hmac it, following the zimbra spec.
Spec and examples are here http://wiki.zimbra.com/wiki/Preauth
"""
if admin:
s = '{0}|1|name|{1}|{2}'.format(account_name, expires, timestamp)
else:
s = '{0}|name|{1}|{2}'.format(account_name, expires, timestamp)
return hmac.new(preauth_key.encode('utf-8'), s.encode('utf-8'),
hashlib.sha1).hexdigest()
|
def function[build_preauth_str, parameter[preauth_key, account_name, timestamp, expires, admin]]:
constant[ Builds the preauth string and hmac it, following the zimbra spec.
Spec and examples are here http://wiki.zimbra.com/wiki/Preauth
]
if name[admin] begin[:]
variable[s] assign[=] call[constant[{0}|1|name|{1}|{2}].format, parameter[name[account_name], name[expires], name[timestamp]]]
return[call[call[name[hmac].new, parameter[call[name[preauth_key].encode, parameter[constant[utf-8]]], call[name[s].encode, parameter[constant[utf-8]]], name[hashlib].sha1]].hexdigest, parameter[]]]
|
keyword[def] identifier[build_preauth_str] ( identifier[preauth_key] , identifier[account_name] , identifier[timestamp] , identifier[expires] ,
identifier[admin] = keyword[False] ):
literal[string]
keyword[if] identifier[admin] :
identifier[s] = literal[string] . identifier[format] ( identifier[account_name] , identifier[expires] , identifier[timestamp] )
keyword[else] :
identifier[s] = literal[string] . identifier[format] ( identifier[account_name] , identifier[expires] , identifier[timestamp] )
keyword[return] identifier[hmac] . identifier[new] ( identifier[preauth_key] . identifier[encode] ( literal[string] ), identifier[s] . identifier[encode] ( literal[string] ),
identifier[hashlib] . identifier[sha1] ). identifier[hexdigest] ()
|
def build_preauth_str(preauth_key, account_name, timestamp, expires, admin=False):
""" Builds the preauth string and hmac it, following the zimbra spec.
Spec and examples are here http://wiki.zimbra.com/wiki/Preauth
"""
if admin:
s = '{0}|1|name|{1}|{2}'.format(account_name, expires, timestamp) # depends on [control=['if'], data=[]]
else:
s = '{0}|name|{1}|{2}'.format(account_name, expires, timestamp)
return hmac.new(preauth_key.encode('utf-8'), s.encode('utf-8'), hashlib.sha1).hexdigest()
|
def _put_model(D, name, dat, m):
"""
Place the model data given, into the location (m) given.
:param dict D: Metadata (dataset)
:param str name: Model name (ex: chron0model0)
:param dict dat: Model data
:param regex m: Model name regex groups
:return dict D: Metadata (dataset)
"""
try:
# print("Placing model: {}".format(name))
_pc = m.group(1) + "Data"
_section = m.group(1) + m.group(2)
if _pc not in D:
# Section missing entirely? Can't continue
print("{} not found in the provided dataset. Please try again".format(_pc))
return
else:
if _section not in D[_pc]:
# Creates section: Example: D[chronData][chron0]
D[_pc][_section] = OrderedDict()
if "model" not in D[_pc][_section]:
# Creates model top level: Example: D[chronData][chron0]["model"]
D[_pc][_section]["model"] = OrderedDict()
if name not in D[_pc][_section]["model"]:
dat = _update_table_names(name, dat)
D[_pc][_section]["model"][name] = dat
else:
# Model already exists, should we overwrite it?
_prompt_overwrite = input(
"This model already exists in the dataset. Do you want to overwrite it? (y/n)")
# Yes, overwrite with the model data provided
if _prompt_overwrite == "y":
dat = _update_table_names(name, dat)
D[_pc][_section]["model"][name] = dat
# No, do not overwrite.
elif _prompt_overwrite == "n":
_name2 = _prompt_placement(D, "model")
_m = re.match(re_model_name, _name2)
if _m:
D = _put_model(D, _name2, dat, _m)
else:
print("Invalid choice")
except Exception as e:
print("addModel: Unable to put the model data into the dataset, {}".format(e))
return D
|
def function[_put_model, parameter[D, name, dat, m]]:
constant[
Place the model data given, into the location (m) given.
:param dict D: Metadata (dataset)
:param str name: Model name (ex: chron0model0)
:param dict dat: Model data
:param regex m: Model name regex groups
:return dict D: Metadata (dataset)
]
<ast.Try object at 0x7da1b26ac0a0>
return[name[D]]
|
keyword[def] identifier[_put_model] ( identifier[D] , identifier[name] , identifier[dat] , identifier[m] ):
literal[string]
keyword[try] :
identifier[_pc] = identifier[m] . identifier[group] ( literal[int] )+ literal[string]
identifier[_section] = identifier[m] . identifier[group] ( literal[int] )+ identifier[m] . identifier[group] ( literal[int] )
keyword[if] identifier[_pc] keyword[not] keyword[in] identifier[D] :
identifier[print] ( literal[string] . identifier[format] ( identifier[_pc] ))
keyword[return]
keyword[else] :
keyword[if] identifier[_section] keyword[not] keyword[in] identifier[D] [ identifier[_pc] ]:
identifier[D] [ identifier[_pc] ][ identifier[_section] ]= identifier[OrderedDict] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[D] [ identifier[_pc] ][ identifier[_section] ]:
identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ]= identifier[OrderedDict] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ]:
identifier[dat] = identifier[_update_table_names] ( identifier[name] , identifier[dat] )
identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ][ identifier[name] ]= identifier[dat]
keyword[else] :
identifier[_prompt_overwrite] = identifier[input] (
literal[string] )
keyword[if] identifier[_prompt_overwrite] == literal[string] :
identifier[dat] = identifier[_update_table_names] ( identifier[name] , identifier[dat] )
identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ][ identifier[name] ]= identifier[dat]
keyword[elif] identifier[_prompt_overwrite] == literal[string] :
identifier[_name2] = identifier[_prompt_placement] ( identifier[D] , literal[string] )
identifier[_m] = identifier[re] . identifier[match] ( identifier[re_model_name] , identifier[_name2] )
keyword[if] identifier[_m] :
identifier[D] = identifier[_put_model] ( identifier[D] , identifier[_name2] , identifier[dat] , identifier[_m] )
keyword[else] :
identifier[print] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return] identifier[D]
|
def _put_model(D, name, dat, m):
"""
Place the model data given, into the location (m) given.
:param dict D: Metadata (dataset)
:param str name: Model name (ex: chron0model0)
:param dict dat: Model data
:param regex m: Model name regex groups
:return dict D: Metadata (dataset)
"""
try:
# print("Placing model: {}".format(name))
_pc = m.group(1) + 'Data'
_section = m.group(1) + m.group(2)
if _pc not in D:
# Section missing entirely? Can't continue
print('{} not found in the provided dataset. Please try again'.format(_pc))
return # depends on [control=['if'], data=['_pc']]
else:
if _section not in D[_pc]:
# Creates section: Example: D[chronData][chron0]
D[_pc][_section] = OrderedDict() # depends on [control=['if'], data=['_section']]
if 'model' not in D[_pc][_section]:
# Creates model top level: Example: D[chronData][chron0]["model"]
D[_pc][_section]['model'] = OrderedDict() # depends on [control=['if'], data=[]]
if name not in D[_pc][_section]['model']:
dat = _update_table_names(name, dat)
D[_pc][_section]['model'][name] = dat # depends on [control=['if'], data=['name']]
else:
# Model already exists, should we overwrite it?
_prompt_overwrite = input('This model already exists in the dataset. Do you want to overwrite it? (y/n)')
# Yes, overwrite with the model data provided
if _prompt_overwrite == 'y':
dat = _update_table_names(name, dat)
D[_pc][_section]['model'][name] = dat # depends on [control=['if'], data=[]]
# No, do not overwrite.
elif _prompt_overwrite == 'n':
_name2 = _prompt_placement(D, 'model')
_m = re.match(re_model_name, _name2)
if _m:
D = _put_model(D, _name2, dat, _m) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
print('Invalid choice') # depends on [control=['try'], data=[]]
except Exception as e:
print('addModel: Unable to put the model data into the dataset, {}'.format(e)) # depends on [control=['except'], data=['e']]
return D
|
def get_stream_formats(self, media_item):
"""Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
"""
scraper = ScraperApi(self._ajax_api._connector)
formats = scraper.get_media_formats(media_item.media_id)
return formats
|
def function[get_stream_formats, parameter[self, media_item]]:
constant[Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
]
variable[scraper] assign[=] call[name[ScraperApi], parameter[name[self]._ajax_api._connector]]
variable[formats] assign[=] call[name[scraper].get_media_formats, parameter[name[media_item].media_id]]
return[name[formats]]
|
keyword[def] identifier[get_stream_formats] ( identifier[self] , identifier[media_item] ):
literal[string]
identifier[scraper] = identifier[ScraperApi] ( identifier[self] . identifier[_ajax_api] . identifier[_connector] )
identifier[formats] = identifier[scraper] . identifier[get_media_formats] ( identifier[media_item] . identifier[media_id] )
keyword[return] identifier[formats]
|
def get_stream_formats(self, media_item):
"""Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
"""
scraper = ScraperApi(self._ajax_api._connector)
formats = scraper.get_media_formats(media_item.media_id)
return formats
|
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
for _ in range(64 - len(data)):
data.append(0)
#logging.debug("send: %s", data)
self.device.write(bytearray([0]) + data)
return
|
def function[write, parameter[self, data]]:
constant[
write data on the OUT endpoint associated to the HID interface
]
for taget[name[_]] in starred[call[name[range], parameter[binary_operation[constant[64] - call[name[len], parameter[name[data]]]]]]] begin[:]
call[name[data].append, parameter[constant[0]]]
call[name[self].device.write, parameter[binary_operation[call[name[bytearray], parameter[list[[<ast.Constant object at 0x7da1b065add0>]]]] + name[data]]]]
return[None]
|
keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] - identifier[len] ( identifier[data] )):
identifier[data] . identifier[append] ( literal[int] )
identifier[self] . identifier[device] . identifier[write] ( identifier[bytearray] ([ literal[int] ])+ identifier[data] )
keyword[return]
|
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
for _ in range(64 - len(data)):
data.append(0) # depends on [control=['for'], data=[]]
#logging.debug("send: %s", data)
self.device.write(bytearray([0]) + data)
return
|
def get_branches(self):
"""
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
"""
return github.PaginatedList.PaginatedList(
github.Branch.Branch,
self._requester,
self.url + "/branches",
None
)
|
def function[get_branches, parameter[self]]:
constant[
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
]
return[call[name[github].PaginatedList.PaginatedList, parameter[name[github].Branch.Branch, name[self]._requester, binary_operation[name[self].url + constant[/branches]], constant[None]]]]
|
keyword[def] identifier[get_branches] ( identifier[self] ):
literal[string]
keyword[return] identifier[github] . identifier[PaginatedList] . identifier[PaginatedList] (
identifier[github] . identifier[Branch] . identifier[Branch] ,
identifier[self] . identifier[_requester] ,
identifier[self] . identifier[url] + literal[string] ,
keyword[None]
)
|
def get_branches(self):
"""
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
"""
return github.PaginatedList.PaginatedList(github.Branch.Branch, self._requester, self.url + '/branches', None)
|
def _download_response(self):
"""Returns a response body string from the server."""
if self.network.limit_rate:
self.network._delay_call()
data = []
for name in self.params.keys():
data.append("=".join((name, url_quote_plus(_string(self.params[name])))))
data = "&".join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Charset": "utf-8",
"User-Agent": "pylast" + "/" + __version__,
}
(host_name, host_subdir) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = HTTPSConnection(
context=SSL_CONTEXT,
host=self.network._get_proxy()[0],
port=self.network._get_proxy()[1],
)
try:
conn.request(
method="POST",
url="https://" + host_name + host_subdir,
body=data,
headers=headers,
)
except Exception as e:
raise NetworkError(self.network, e)
else:
conn = HTTPSConnection(context=SSL_CONTEXT, host=host_name)
try:
conn.request(method="POST", url=host_subdir, body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
try:
response_text = _unicode(conn.getresponse().read())
except Exception as e:
raise MalformedResponseError(self.network, e)
try:
self._check_response_for_errors(response_text)
finally:
conn.close()
return response_text
|
def function[_download_response, parameter[self]]:
constant[Returns a response body string from the server.]
if name[self].network.limit_rate begin[:]
call[name[self].network._delay_call, parameter[]]
variable[data] assign[=] list[[]]
for taget[name[name]] in starred[call[name[self].params.keys, parameter[]]] begin[:]
call[name[data].append, parameter[call[constant[=].join, parameter[tuple[[<ast.Name object at 0x7da1b0bd9d50>, <ast.Call object at 0x7da1b0bd9e40>]]]]]]
variable[data] assign[=] call[constant[&].join, parameter[name[data]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0bd8af0>, <ast.Constant object at 0x7da1b0b45c60>, <ast.Constant object at 0x7da1b0b47cd0>], [<ast.Constant object at 0x7da1b0b45f00>, <ast.Constant object at 0x7da1b0b47bb0>, <ast.BinOp object at 0x7da1b0b448e0>]]
<ast.Tuple object at 0x7da1b0b46d40> assign[=] name[self].network.ws_server
if call[name[self].network.is_proxy_enabled, parameter[]] begin[:]
variable[conn] assign[=] call[name[HTTPSConnection], parameter[]]
<ast.Try object at 0x7da1b0bdba90>
<ast.Try object at 0x7da1b0bdb040>
<ast.Try object at 0x7da1b0bd9660>
return[name[response_text]]
|
keyword[def] identifier[_download_response] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[network] . identifier[limit_rate] :
identifier[self] . identifier[network] . identifier[_delay_call] ()
identifier[data] =[]
keyword[for] identifier[name] keyword[in] identifier[self] . identifier[params] . identifier[keys] ():
identifier[data] . identifier[append] ( literal[string] . identifier[join] (( identifier[name] , identifier[url_quote_plus] ( identifier[_string] ( identifier[self] . identifier[params] [ identifier[name] ])))))
identifier[data] = literal[string] . identifier[join] ( identifier[data] )
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] + literal[string] + identifier[__version__] ,
}
( identifier[host_name] , identifier[host_subdir] )= identifier[self] . identifier[network] . identifier[ws_server]
keyword[if] identifier[self] . identifier[network] . identifier[is_proxy_enabled] ():
identifier[conn] = identifier[HTTPSConnection] (
identifier[context] = identifier[SSL_CONTEXT] ,
identifier[host] = identifier[self] . identifier[network] . identifier[_get_proxy] ()[ literal[int] ],
identifier[port] = identifier[self] . identifier[network] . identifier[_get_proxy] ()[ literal[int] ],
)
keyword[try] :
identifier[conn] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = literal[string] + identifier[host_name] + identifier[host_subdir] ,
identifier[body] = identifier[data] ,
identifier[headers] = identifier[headers] ,
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[NetworkError] ( identifier[self] . identifier[network] , identifier[e] )
keyword[else] :
identifier[conn] = identifier[HTTPSConnection] ( identifier[context] = identifier[SSL_CONTEXT] , identifier[host] = identifier[host_name] )
keyword[try] :
identifier[conn] . identifier[request] ( identifier[method] = literal[string] , identifier[url] = identifier[host_subdir] , identifier[body] = identifier[data] , identifier[headers] = identifier[headers] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[NetworkError] ( identifier[self] . identifier[network] , identifier[e] )
keyword[try] :
identifier[response_text] = identifier[_unicode] ( identifier[conn] . identifier[getresponse] (). identifier[read] ())
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[MalformedResponseError] ( identifier[self] . identifier[network] , identifier[e] )
keyword[try] :
identifier[self] . identifier[_check_response_for_errors] ( identifier[response_text] )
keyword[finally] :
identifier[conn] . identifier[close] ()
keyword[return] identifier[response_text]
|
def _download_response(self):
"""Returns a response body string from the server."""
if self.network.limit_rate:
self.network._delay_call() # depends on [control=['if'], data=[]]
data = []
for name in self.params.keys():
data.append('='.join((name, url_quote_plus(_string(self.params[name]))))) # depends on [control=['for'], data=['name']]
data = '&'.join(data)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept-Charset': 'utf-8', 'User-Agent': 'pylast' + '/' + __version__}
(host_name, host_subdir) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = HTTPSConnection(context=SSL_CONTEXT, host=self.network._get_proxy()[0], port=self.network._get_proxy()[1])
try:
conn.request(method='POST', url='https://' + host_name + host_subdir, body=data, headers=headers) # depends on [control=['try'], data=[]]
except Exception as e:
raise NetworkError(self.network, e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
conn = HTTPSConnection(context=SSL_CONTEXT, host=host_name)
try:
conn.request(method='POST', url=host_subdir, body=data, headers=headers) # depends on [control=['try'], data=[]]
except Exception as e:
raise NetworkError(self.network, e) # depends on [control=['except'], data=['e']]
try:
response_text = _unicode(conn.getresponse().read()) # depends on [control=['try'], data=[]]
except Exception as e:
raise MalformedResponseError(self.network, e) # depends on [control=['except'], data=['e']]
try:
self._check_response_for_errors(response_text) # depends on [control=['try'], data=[]]
finally:
conn.close()
return response_text
|
def find_n_pc(u, factor=0.5):
"""Find number of principal components
This method finds the minimum number of principal components required
Parameters
----------
u : np.ndarray
Left singular vector of the original data
factor : float, optional
Factor for testing the auto correlation (default is '0.5')
Returns
-------
int number of principal components
Examples
--------
>>> from scipy.linalg import svd
>>> from modopt.signal.svd import find_n_pc
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> find_n_pc(svd(x)[0])
array([3])
"""
if np.sqrt(u.shape[0]) % 1:
raise ValueError('Invalid left singular value. The size of the first '
'dimenion of u must be perfect square.')
# Get the shape of the array
array_shape = np.repeat(np.int(np.sqrt(u.shape[0])), 2)
# Find the auto correlation of the left singular vector.
u_auto = [convolve(a.reshape(array_shape),
np.rot90(a.reshape(array_shape), 2)) for a in u.T]
# Return the required number of principal components.
return np.sum([(a[tuple(zip(array_shape // 2))] ** 2 <= factor *
np.sum(a ** 2)) for a in u_auto])
|
def function[find_n_pc, parameter[u, factor]]:
constant[Find number of principal components
This method finds the minimum number of principal components required
Parameters
----------
u : np.ndarray
Left singular vector of the original data
factor : float, optional
Factor for testing the auto correlation (default is '0.5')
Returns
-------
int number of principal components
Examples
--------
>>> from scipy.linalg import svd
>>> from modopt.signal.svd import find_n_pc
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> find_n_pc(svd(x)[0])
array([3])
]
if binary_operation[call[name[np].sqrt, parameter[call[name[u].shape][constant[0]]]] <ast.Mod object at 0x7da2590d6920> constant[1]] begin[:]
<ast.Raise object at 0x7da1b0e9eb60>
variable[array_shape] assign[=] call[name[np].repeat, parameter[call[name[np].int, parameter[call[name[np].sqrt, parameter[call[name[u].shape][constant[0]]]]]], constant[2]]]
variable[u_auto] assign[=] <ast.ListComp object at 0x7da1b0e9d150>
return[call[name[np].sum, parameter[<ast.ListComp object at 0x7da1b0e16f50>]]]
|
keyword[def] identifier[find_n_pc] ( identifier[u] , identifier[factor] = literal[int] ):
literal[string]
keyword[if] identifier[np] . identifier[sqrt] ( identifier[u] . identifier[shape] [ literal[int] ])% literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[array_shape] = identifier[np] . identifier[repeat] ( identifier[np] . identifier[int] ( identifier[np] . identifier[sqrt] ( identifier[u] . identifier[shape] [ literal[int] ])), literal[int] )
identifier[u_auto] =[ identifier[convolve] ( identifier[a] . identifier[reshape] ( identifier[array_shape] ),
identifier[np] . identifier[rot90] ( identifier[a] . identifier[reshape] ( identifier[array_shape] ), literal[int] )) keyword[for] identifier[a] keyword[in] identifier[u] . identifier[T] ]
keyword[return] identifier[np] . identifier[sum] ([( identifier[a] [ identifier[tuple] ( identifier[zip] ( identifier[array_shape] // literal[int] ))]** literal[int] <= identifier[factor] *
identifier[np] . identifier[sum] ( identifier[a] ** literal[int] )) keyword[for] identifier[a] keyword[in] identifier[u_auto] ])
|
def find_n_pc(u, factor=0.5):
"""Find number of principal components
This method finds the minimum number of principal components required
Parameters
----------
u : np.ndarray
Left singular vector of the original data
factor : float, optional
Factor for testing the auto correlation (default is '0.5')
Returns
-------
int number of principal components
Examples
--------
>>> from scipy.linalg import svd
>>> from modopt.signal.svd import find_n_pc
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> find_n_pc(svd(x)[0])
array([3])
"""
if np.sqrt(u.shape[0]) % 1:
raise ValueError('Invalid left singular value. The size of the first dimenion of u must be perfect square.') # depends on [control=['if'], data=[]]
# Get the shape of the array
array_shape = np.repeat(np.int(np.sqrt(u.shape[0])), 2)
# Find the auto correlation of the left singular vector.
u_auto = [convolve(a.reshape(array_shape), np.rot90(a.reshape(array_shape), 2)) for a in u.T]
# Return the required number of principal components.
return np.sum([a[tuple(zip(array_shape // 2))] ** 2 <= factor * np.sum(a ** 2) for a in u_auto])
|
def select_files_by_ifo_combination(ifocomb, insps):
"""
This function selects single-detector files ('insps') for a given ifo combination
"""
inspcomb = FileList()
for ifo, ifile in zip(*insps.categorize_by_attr('ifo')):
if ifo in ifocomb:
inspcomb += ifile
return inspcomb
|
def function[select_files_by_ifo_combination, parameter[ifocomb, insps]]:
constant[
This function selects single-detector files ('insps') for a given ifo combination
]
variable[inspcomb] assign[=] call[name[FileList], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2041daef0>, <ast.Name object at 0x7da2041d84f0>]]] in starred[call[name[zip], parameter[<ast.Starred object at 0x7da2041dabf0>]]] begin[:]
if compare[name[ifo] in name[ifocomb]] begin[:]
<ast.AugAssign object at 0x7da2041da9b0>
return[name[inspcomb]]
|
keyword[def] identifier[select_files_by_ifo_combination] ( identifier[ifocomb] , identifier[insps] ):
literal[string]
identifier[inspcomb] = identifier[FileList] ()
keyword[for] identifier[ifo] , identifier[ifile] keyword[in] identifier[zip] (* identifier[insps] . identifier[categorize_by_attr] ( literal[string] )):
keyword[if] identifier[ifo] keyword[in] identifier[ifocomb] :
identifier[inspcomb] += identifier[ifile]
keyword[return] identifier[inspcomb]
|
def select_files_by_ifo_combination(ifocomb, insps):
"""
This function selects single-detector files ('insps') for a given ifo combination
"""
inspcomb = FileList()
for (ifo, ifile) in zip(*insps.categorize_by_attr('ifo')):
if ifo in ifocomb:
inspcomb += ifile # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return inspcomb
|
def glr_path_static():
"""Returns path to packaged static files"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
|
def function[glr_path_static, parameter[]]:
constant[Returns path to packaged static files]
return[call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[_static]]]]]]
|
keyword[def] identifier[glr_path_static] ():
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] ))
|
def glr_path_static():
"""Returns path to packaged static files"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
|
def get(self, interface_id, interface_ip=None):
"""
Get will return a list of interface references based on the
specified interface id. Multiple references can be returned if
a single interface has multiple IP addresses assigned.
:return: If interface_ip is provided, a single ContactAddressNode
element is returned if found. Otherwise a list will be
returned with all contact address nodes for the given
interface_id.
"""
interfaces = []
for interface in iter(self):
if interface.interface_id == str(interface_id):
if interface_ip:
if interface.interface_ip == interface_ip:
return interface
else:
interfaces.append(interface)
return interfaces
|
def function[get, parameter[self, interface_id, interface_ip]]:
constant[
Get will return a list of interface references based on the
specified interface id. Multiple references can be returned if
a single interface has multiple IP addresses assigned.
:return: If interface_ip is provided, a single ContactAddressNode
element is returned if found. Otherwise a list will be
returned with all contact address nodes for the given
interface_id.
]
variable[interfaces] assign[=] list[[]]
for taget[name[interface]] in starred[call[name[iter], parameter[name[self]]]] begin[:]
if compare[name[interface].interface_id equal[==] call[name[str], parameter[name[interface_id]]]] begin[:]
if name[interface_ip] begin[:]
if compare[name[interface].interface_ip equal[==] name[interface_ip]] begin[:]
return[name[interface]]
return[name[interfaces]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[interface_id] , identifier[interface_ip] = keyword[None] ):
literal[string]
identifier[interfaces] =[]
keyword[for] identifier[interface] keyword[in] identifier[iter] ( identifier[self] ):
keyword[if] identifier[interface] . identifier[interface_id] == identifier[str] ( identifier[interface_id] ):
keyword[if] identifier[interface_ip] :
keyword[if] identifier[interface] . identifier[interface_ip] == identifier[interface_ip] :
keyword[return] identifier[interface]
keyword[else] :
identifier[interfaces] . identifier[append] ( identifier[interface] )
keyword[return] identifier[interfaces]
|
def get(self, interface_id, interface_ip=None):
"""
Get will return a list of interface references based on the
specified interface id. Multiple references can be returned if
a single interface has multiple IP addresses assigned.
:return: If interface_ip is provided, a single ContactAddressNode
element is returned if found. Otherwise a list will be
returned with all contact address nodes for the given
interface_id.
"""
interfaces = []
for interface in iter(self):
if interface.interface_id == str(interface_id):
if interface_ip:
if interface.interface_ip == interface_ip:
return interface # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
interfaces.append(interface) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['interface']]
return interfaces
|
def _normalize(number):
"""Normalizes a string of characters representing a phone number.
This performs the following conversions:
- Punctuation is stripped.
- For ALPHA/VANITY numbers:
- Letters are converted to their numeric representation on a telephone
keypad. The keypad used here is the one defined in ITU
Recommendation E.161. This is only done if there are 3 or more
letters in the number, to lessen the risk that such letters are
typos.
- For other numbers:
- Wide-ascii digits are converted to normal ASCII (European) digits.
- Arabic-Indic numerals are converted to European numerals.
- Spurious alpha characters are stripped.
Arguments:
number -- string representing a phone number
Returns the normalized string version of the phone number.
"""
m = fullmatch(_VALID_ALPHA_PHONE_PATTERN, number)
if m:
return _normalize_helper(number, _ALPHA_PHONE_MAPPINGS, True)
else:
return normalize_digits_only(number)
|
def function[_normalize, parameter[number]]:
constant[Normalizes a string of characters representing a phone number.
This performs the following conversions:
- Punctuation is stripped.
- For ALPHA/VANITY numbers:
- Letters are converted to their numeric representation on a telephone
keypad. The keypad used here is the one defined in ITU
Recommendation E.161. This is only done if there are 3 or more
letters in the number, to lessen the risk that such letters are
typos.
- For other numbers:
- Wide-ascii digits are converted to normal ASCII (European) digits.
- Arabic-Indic numerals are converted to European numerals.
- Spurious alpha characters are stripped.
Arguments:
number -- string representing a phone number
Returns the normalized string version of the phone number.
]
variable[m] assign[=] call[name[fullmatch], parameter[name[_VALID_ALPHA_PHONE_PATTERN], name[number]]]
if name[m] begin[:]
return[call[name[_normalize_helper], parameter[name[number], name[_ALPHA_PHONE_MAPPINGS], constant[True]]]]
|
keyword[def] identifier[_normalize] ( identifier[number] ):
literal[string]
identifier[m] = identifier[fullmatch] ( identifier[_VALID_ALPHA_PHONE_PATTERN] , identifier[number] )
keyword[if] identifier[m] :
keyword[return] identifier[_normalize_helper] ( identifier[number] , identifier[_ALPHA_PHONE_MAPPINGS] , keyword[True] )
keyword[else] :
keyword[return] identifier[normalize_digits_only] ( identifier[number] )
|
def _normalize(number):
"""Normalizes a string of characters representing a phone number.
This performs the following conversions:
- Punctuation is stripped.
- For ALPHA/VANITY numbers:
- Letters are converted to their numeric representation on a telephone
keypad. The keypad used here is the one defined in ITU
Recommendation E.161. This is only done if there are 3 or more
letters in the number, to lessen the risk that such letters are
typos.
- For other numbers:
- Wide-ascii digits are converted to normal ASCII (European) digits.
- Arabic-Indic numerals are converted to European numerals.
- Spurious alpha characters are stripped.
Arguments:
number -- string representing a phone number
Returns the normalized string version of the phone number.
"""
m = fullmatch(_VALID_ALPHA_PHONE_PATTERN, number)
if m:
return _normalize_helper(number, _ALPHA_PHONE_MAPPINGS, True) # depends on [control=['if'], data=[]]
else:
return normalize_digits_only(number)
|
def random_walk_normal_fn(scale=1., name=None):
"""Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a normal perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(
name, 'random_walk_normal_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts)
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.')
seed_stream = distributions.SeedStream(seed, salt='RandomWalkNormalFn')
next_state_parts = [
tf.random.normal(
mean=state_part,
stddev=scale_part,
shape=tf.shape(input=state_part),
dtype=state_part.dtype.base_dtype,
seed=seed_stream())
for scale_part, state_part in zip(scales, state_parts)
]
return next_state_parts
return _fn
|
def function[random_walk_normal_fn, parameter[scale, name]]:
constant[Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
]
def function[_fn, parameter[state_parts, seed]]:
constant[Adds a normal perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
]
with call[name[tf].compat.v1.name_scope, parameter[name[name], constant[random_walk_normal_fn]]] begin[:]
variable[scales] assign[=] <ast.IfExp object at 0x7da1b03e37c0>
if compare[call[name[len], parameter[name[scales]]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b03e0be0>
if compare[call[name[len], parameter[name[state_parts]]] not_equal[!=] call[name[len], parameter[name[scales]]]] begin[:]
<ast.Raise object at 0x7da1b03e30a0>
variable[seed_stream] assign[=] call[name[distributions].SeedStream, parameter[name[seed]]]
variable[next_state_parts] assign[=] <ast.ListComp object at 0x7da1b03e34f0>
return[name[next_state_parts]]
return[name[_fn]]
|
keyword[def] identifier[random_walk_normal_fn] ( identifier[scale] = literal[int] , identifier[name] = keyword[None] ):
literal[string]
keyword[def] identifier[_fn] ( identifier[state_parts] , identifier[seed] ):
literal[string]
keyword[with] identifier[tf] . identifier[compat] . identifier[v1] . identifier[name_scope] (
identifier[name] , literal[string] , identifier[values] =[ identifier[state_parts] , identifier[scale] , identifier[seed] ]):
identifier[scales] = identifier[scale] keyword[if] identifier[mcmc_util] . identifier[is_list_like] ( identifier[scale] ) keyword[else] [ identifier[scale] ]
keyword[if] identifier[len] ( identifier[scales] )== literal[int] :
identifier[scales] *= identifier[len] ( identifier[state_parts] )
keyword[if] identifier[len] ( identifier[state_parts] )!= identifier[len] ( identifier[scales] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[seed_stream] = identifier[distributions] . identifier[SeedStream] ( identifier[seed] , identifier[salt] = literal[string] )
identifier[next_state_parts] =[
identifier[tf] . identifier[random] . identifier[normal] (
identifier[mean] = identifier[state_part] ,
identifier[stddev] = identifier[scale_part] ,
identifier[shape] = identifier[tf] . identifier[shape] ( identifier[input] = identifier[state_part] ),
identifier[dtype] = identifier[state_part] . identifier[dtype] . identifier[base_dtype] ,
identifier[seed] = identifier[seed_stream] ())
keyword[for] identifier[scale_part] , identifier[state_part] keyword[in] identifier[zip] ( identifier[scales] , identifier[state_parts] )
]
keyword[return] identifier[next_state_parts]
keyword[return] identifier[_fn]
|
def random_walk_normal_fn(scale=1.0, name=None):
"""Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm.
"""
def _fn(state_parts, seed):
"""Adds a normal perturbation to the input state.
Args:
state_parts: A list of `Tensor`s of any shape and real dtype representing
the state parts of the `current_state` of the Markov chain.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
Returns:
perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same
shape and type as the `state_parts`.
Raises:
ValueError: if `scale` does not broadcast with `state_parts`.
"""
with tf.compat.v1.name_scope(name, 'random_walk_normal_fn', values=[state_parts, scale, seed]):
scales = scale if mcmc_util.is_list_like(scale) else [scale]
if len(scales) == 1:
scales *= len(state_parts) # depends on [control=['if'], data=[]]
if len(state_parts) != len(scales):
raise ValueError('`scale` must broadcast with `state_parts`.') # depends on [control=['if'], data=[]]
seed_stream = distributions.SeedStream(seed, salt='RandomWalkNormalFn')
next_state_parts = [tf.random.normal(mean=state_part, stddev=scale_part, shape=tf.shape(input=state_part), dtype=state_part.dtype.base_dtype, seed=seed_stream()) for (scale_part, state_part) in zip(scales, state_parts)]
return next_state_parts # depends on [control=['with'], data=[]]
return _fn
|
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
|
def function[post_helper, parameter[form_tag, edit_mode]]:
constant[
Post's form layout helper
]
variable[helper] assign[=] call[name[FormHelper], parameter[]]
name[helper].form_action assign[=] constant[.]
name[helper].attrs assign[=] dictionary[[<ast.Constant object at 0x7da1b1351cf0>], [<ast.Constant object at 0x7da1b1351c60>]]
name[helper].form_tag assign[=] name[form_tag]
variable[fieldsets] assign[=] list[[<ast.Call object at 0x7da1b1387ee0>]]
if <ast.UnaryOp object at 0x7da1b135e3e0> begin[:]
call[name[fieldsets].append, parameter[call[name[Row], parameter[call[name[Column], parameter[constant[threadwatch]]]]]]]
variable[fieldsets] assign[=] binary_operation[name[fieldsets] + list[[<ast.Call object at 0x7da1b135c520>]]]
name[helper].layout assign[=] call[name[Layout], parameter[<ast.Starred object at 0x7da1b135d990>]]
return[name[helper]]
|
keyword[def] identifier[post_helper] ( identifier[form_tag] = keyword[True] , identifier[edit_mode] = keyword[False] ):
literal[string]
identifier[helper] = identifier[FormHelper] ()
identifier[helper] . identifier[form_action] = literal[string]
identifier[helper] . identifier[attrs] ={ literal[string] : literal[string] }
identifier[helper] . identifier[form_tag] = identifier[form_tag]
identifier[fieldsets] =[
identifier[Row] (
identifier[Column] (
literal[string] ,
identifier[css_class] = literal[string]
),
),
]
keyword[if] keyword[not] identifier[edit_mode] :
identifier[fieldsets] . identifier[append] (
identifier[Row] (
identifier[Column] (
literal[string] ,
identifier[css_class] = literal[string]
),
),
)
identifier[fieldsets] = identifier[fieldsets] +[
identifier[ButtonHolderPanel] (
identifier[Submit] ( literal[string] , identifier[_] ( literal[string] )),
identifier[css_class] = literal[string] ,
),
]
identifier[helper] . identifier[layout] = identifier[Layout] (* identifier[fieldsets] )
keyword[return] identifier[helper]
|
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [Row(Column('text', css_class='small-12'))]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(Row(Column('threadwatch', css_class='small-12'))) # depends on [control=['if'], data=[]]
fieldsets = fieldsets + [ButtonHolderPanel(Submit('submit', _('Submit')), css_class='text-right')]
helper.layout = Layout(*fieldsets)
return helper
|
def has_perm(self, perm, obj, check_groups=True, approved=True):
"""
Check if user has the permission for the given object
"""
if self.user:
if self.has_user_perms(perm, obj, approved, check_groups):
return True
if self.group:
return self.has_group_perms(perm, obj, approved)
return False
|
def function[has_perm, parameter[self, perm, obj, check_groups, approved]]:
constant[
Check if user has the permission for the given object
]
if name[self].user begin[:]
if call[name[self].has_user_perms, parameter[name[perm], name[obj], name[approved], name[check_groups]]] begin[:]
return[constant[True]]
if name[self].group begin[:]
return[call[name[self].has_group_perms, parameter[name[perm], name[obj], name[approved]]]]
return[constant[False]]
|
keyword[def] identifier[has_perm] ( identifier[self] , identifier[perm] , identifier[obj] , identifier[check_groups] = keyword[True] , identifier[approved] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[user] :
keyword[if] identifier[self] . identifier[has_user_perms] ( identifier[perm] , identifier[obj] , identifier[approved] , identifier[check_groups] ):
keyword[return] keyword[True]
keyword[if] identifier[self] . identifier[group] :
keyword[return] identifier[self] . identifier[has_group_perms] ( identifier[perm] , identifier[obj] , identifier[approved] )
keyword[return] keyword[False]
|
def has_perm(self, perm, obj, check_groups=True, approved=True):
"""
Check if user has the permission for the given object
"""
if self.user:
if self.has_user_perms(perm, obj, approved, check_groups):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.group:
return self.has_group_perms(perm, obj, approved) # depends on [control=['if'], data=[]]
return False
|
def gammatone(freq, bandwidth):
"""
``Slaney, M. "An Efficient Implementation of the Patterson-Holdsworth
Auditory Filter Bank", Apple Computer Technical Report #35, 1993.``
"""
A = exp(-bandwidth)
cosw = cos(freq)
sinw = sin(freq)
sig = [1., -1.]
coeff = [cosw + s1 * (sqrt(2) + s2) * sinw for s1 in sig for s2 in sig]
numerator = [1 - A * c * z ** -1 for c in coeff]
denominator = 1 - 2 * A * cosw * z ** -1 + A ** 2 * z ** -2
filt = CascadeFilter(num / denominator for num in numerator)
return CascadeFilter(f / abs(f.freq_response(freq)) for f in filt)
|
def function[gammatone, parameter[freq, bandwidth]]:
constant[
``Slaney, M. "An Efficient Implementation of the Patterson-Holdsworth
Auditory Filter Bank", Apple Computer Technical Report #35, 1993.``
]
variable[A] assign[=] call[name[exp], parameter[<ast.UnaryOp object at 0x7da1b074fee0>]]
variable[cosw] assign[=] call[name[cos], parameter[name[freq]]]
variable[sinw] assign[=] call[name[sin], parameter[name[freq]]]
variable[sig] assign[=] list[[<ast.Constant object at 0x7da1b074f400>, <ast.UnaryOp object at 0x7da1b074de70>]]
variable[coeff] assign[=] <ast.ListComp object at 0x7da1b074d6f0>
variable[numerator] assign[=] <ast.ListComp object at 0x7da1b074ffa0>
variable[denominator] assign[=] binary_operation[binary_operation[constant[1] - binary_operation[binary_operation[binary_operation[constant[2] * name[A]] * name[cosw]] * binary_operation[name[z] ** <ast.UnaryOp object at 0x7da1b0617d60>]]] + binary_operation[binary_operation[name[A] ** constant[2]] * binary_operation[name[z] ** <ast.UnaryOp object at 0x7da1b0748820>]]]
variable[filt] assign[=] call[name[CascadeFilter], parameter[<ast.GeneratorExp object at 0x7da1b07fb370>]]
return[call[name[CascadeFilter], parameter[<ast.GeneratorExp object at 0x7da1b07fbe20>]]]
|
keyword[def] identifier[gammatone] ( identifier[freq] , identifier[bandwidth] ):
literal[string]
identifier[A] = identifier[exp] (- identifier[bandwidth] )
identifier[cosw] = identifier[cos] ( identifier[freq] )
identifier[sinw] = identifier[sin] ( identifier[freq] )
identifier[sig] =[ literal[int] ,- literal[int] ]
identifier[coeff] =[ identifier[cosw] + identifier[s1] *( identifier[sqrt] ( literal[int] )+ identifier[s2] )* identifier[sinw] keyword[for] identifier[s1] keyword[in] identifier[sig] keyword[for] identifier[s2] keyword[in] identifier[sig] ]
identifier[numerator] =[ literal[int] - identifier[A] * identifier[c] * identifier[z] **- literal[int] keyword[for] identifier[c] keyword[in] identifier[coeff] ]
identifier[denominator] = literal[int] - literal[int] * identifier[A] * identifier[cosw] * identifier[z] **- literal[int] + identifier[A] ** literal[int] * identifier[z] **- literal[int]
identifier[filt] = identifier[CascadeFilter] ( identifier[num] / identifier[denominator] keyword[for] identifier[num] keyword[in] identifier[numerator] )
keyword[return] identifier[CascadeFilter] ( identifier[f] / identifier[abs] ( identifier[f] . identifier[freq_response] ( identifier[freq] )) keyword[for] identifier[f] keyword[in] identifier[filt] )
|
def gammatone(freq, bandwidth):
"""
``Slaney, M. "An Efficient Implementation of the Patterson-Holdsworth
Auditory Filter Bank", Apple Computer Technical Report #35, 1993.``
"""
A = exp(-bandwidth)
cosw = cos(freq)
sinw = sin(freq)
sig = [1.0, -1.0]
coeff = [cosw + s1 * (sqrt(2) + s2) * sinw for s1 in sig for s2 in sig]
numerator = [1 - A * c * z ** (-1) for c in coeff]
denominator = 1 - 2 * A * cosw * z ** (-1) + A ** 2 * z ** (-2)
filt = CascadeFilter((num / denominator for num in numerator))
return CascadeFilter((f / abs(f.freq_response(freq)) for f in filt))
|
def show_phase_matrix(sync_output_dynamic, grid_width = None, grid_height = None, iteration = None):
"""!
@brief Shows 2D matrix of phase values of oscillators at the specified iteration.
@details User should ensure correct matrix sizes in line with following expression grid_width x grid_height that should be equal to
amount of oscillators otherwise exception is thrown. If grid_width or grid_height are not specified than phase matrix size
will by calculated automatically by square root.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose phase matrix should be shown.
@param[in] grid_width (uint): Width of the phase matrix.
@param[in] grid_height (uint): Height of the phase matrix.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
"""
_ = plt.figure();
phase_matrix = sync_output_dynamic.allocate_phase_matrix(grid_width, grid_height, iteration);
plt.imshow(phase_matrix, cmap = plt.get_cmap('jet'), interpolation='kaiser', vmin = 0.0, vmax = 2.0 * math.pi);
plt.show();
|
def function[show_phase_matrix, parameter[sync_output_dynamic, grid_width, grid_height, iteration]]:
constant[!
@brief Shows 2D matrix of phase values of oscillators at the specified iteration.
@details User should ensure correct matrix sizes in line with following expression grid_width x grid_height that should be equal to
amount of oscillators otherwise exception is thrown. If grid_width or grid_height are not specified than phase matrix size
will by calculated automatically by square root.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose phase matrix should be shown.
@param[in] grid_width (uint): Width of the phase matrix.
@param[in] grid_height (uint): Height of the phase matrix.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
]
variable[_] assign[=] call[name[plt].figure, parameter[]]
variable[phase_matrix] assign[=] call[name[sync_output_dynamic].allocate_phase_matrix, parameter[name[grid_width], name[grid_height], name[iteration]]]
call[name[plt].imshow, parameter[name[phase_matrix]]]
call[name[plt].show, parameter[]]
|
keyword[def] identifier[show_phase_matrix] ( identifier[sync_output_dynamic] , identifier[grid_width] = keyword[None] , identifier[grid_height] = keyword[None] , identifier[iteration] = keyword[None] ):
literal[string]
identifier[_] = identifier[plt] . identifier[figure] ();
identifier[phase_matrix] = identifier[sync_output_dynamic] . identifier[allocate_phase_matrix] ( identifier[grid_width] , identifier[grid_height] , identifier[iteration] );
identifier[plt] . identifier[imshow] ( identifier[phase_matrix] , identifier[cmap] = identifier[plt] . identifier[get_cmap] ( literal[string] ), identifier[interpolation] = literal[string] , identifier[vmin] = literal[int] , identifier[vmax] = literal[int] * identifier[math] . identifier[pi] );
identifier[plt] . identifier[show] ();
|
def show_phase_matrix(sync_output_dynamic, grid_width=None, grid_height=None, iteration=None):
"""!
@brief Shows 2D matrix of phase values of oscillators at the specified iteration.
@details User should ensure correct matrix sizes in line with following expression grid_width x grid_height that should be equal to
amount of oscillators otherwise exception is thrown. If grid_width or grid_height are not specified than phase matrix size
will by calculated automatically by square root.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose phase matrix should be shown.
@param[in] grid_width (uint): Width of the phase matrix.
@param[in] grid_height (uint): Height of the phase matrix.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
"""
_ = plt.figure()
phase_matrix = sync_output_dynamic.allocate_phase_matrix(grid_width, grid_height, iteration)
plt.imshow(phase_matrix, cmap=plt.get_cmap('jet'), interpolation='kaiser', vmin=0.0, vmax=2.0 * math.pi)
plt.show()
|
def add_formatted_field(cls, field, format_string, title=''):
"""Adds a ``list_display`` attribute showing a field in the object
using a python %formatted string.
:param field:
Name of the field in the object.
:param format_string:
A old-style (to remain python 2.x compatible) % string formatter
with a single variable reference. The named ``field`` attribute
will be passed to the formatter using the "%" operator.
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``field``
"""
global klass_count
klass_count += 1
fn_name = 'dyn_fn_%d' % klass_count
cls.list_display.append(fn_name)
if not title:
title = field.capitalize()
# python scoping is a bit weird with default values, if it isn't
# referenced the inner function won't see it, so assign it for use
_format_string = format_string
def _ref(self, obj):
return _format_string % getattr(obj, field)
_ref.short_description = title
_ref.allow_tags = True
_ref.admin_order_field = field
setattr(cls, fn_name, _ref)
|
def function[add_formatted_field, parameter[cls, field, format_string, title]]:
constant[Adds a ``list_display`` attribute showing a field in the object
using a python %formatted string.
:param field:
Name of the field in the object.
:param format_string:
A old-style (to remain python 2.x compatible) % string formatter
with a single variable reference. The named ``field`` attribute
will be passed to the formatter using the "%" operator.
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``field``
]
<ast.Global object at 0x7da20e957f40>
<ast.AugAssign object at 0x7da20e9575e0>
variable[fn_name] assign[=] binary_operation[constant[dyn_fn_%d] <ast.Mod object at 0x7da2590d6920> name[klass_count]]
call[name[cls].list_display.append, parameter[name[fn_name]]]
if <ast.UnaryOp object at 0x7da1affd65f0> begin[:]
variable[title] assign[=] call[name[field].capitalize, parameter[]]
variable[_format_string] assign[=] name[format_string]
def function[_ref, parameter[self, obj]]:
return[binary_operation[name[_format_string] <ast.Mod object at 0x7da2590d6920> call[name[getattr], parameter[name[obj], name[field]]]]]
name[_ref].short_description assign[=] name[title]
name[_ref].allow_tags assign[=] constant[True]
name[_ref].admin_order_field assign[=] name[field]
call[name[setattr], parameter[name[cls], name[fn_name], name[_ref]]]
|
keyword[def] identifier[add_formatted_field] ( identifier[cls] , identifier[field] , identifier[format_string] , identifier[title] = literal[string] ):
literal[string]
keyword[global] identifier[klass_count]
identifier[klass_count] += literal[int]
identifier[fn_name] = literal[string] % identifier[klass_count]
identifier[cls] . identifier[list_display] . identifier[append] ( identifier[fn_name] )
keyword[if] keyword[not] identifier[title] :
identifier[title] = identifier[field] . identifier[capitalize] ()
identifier[_format_string] = identifier[format_string]
keyword[def] identifier[_ref] ( identifier[self] , identifier[obj] ):
keyword[return] identifier[_format_string] % identifier[getattr] ( identifier[obj] , identifier[field] )
identifier[_ref] . identifier[short_description] = identifier[title]
identifier[_ref] . identifier[allow_tags] = keyword[True]
identifier[_ref] . identifier[admin_order_field] = identifier[field]
identifier[setattr] ( identifier[cls] , identifier[fn_name] , identifier[_ref] )
|
def add_formatted_field(cls, field, format_string, title=''):
"""Adds a ``list_display`` attribute showing a field in the object
using a python %formatted string.
:param field:
Name of the field in the object.
:param format_string:
A old-style (to remain python 2.x compatible) % string formatter
with a single variable reference. The named ``field`` attribute
will be passed to the formatter using the "%" operator.
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``field``
"""
global klass_count
klass_count += 1
fn_name = 'dyn_fn_%d' % klass_count
cls.list_display.append(fn_name)
if not title:
title = field.capitalize() # depends on [control=['if'], data=[]]
# python scoping is a bit weird with default values, if it isn't
# referenced the inner function won't see it, so assign it for use
_format_string = format_string
def _ref(self, obj):
return _format_string % getattr(obj, field)
_ref.short_description = title
_ref.allow_tags = True
_ref.admin_order_field = field
setattr(cls, fn_name, _ref)
|
def get_public_keys_der_v3(self):
"""
Return a list of DER coded X.509 public keys from the v3 signature block
"""
if self._v3_signing_data == None:
self.parse_v3_signing_block()
public_keys = []
for signer in self._v3_signing_data:
public_keys.append(signer.public_key)
return public_keys
|
def function[get_public_keys_der_v3, parameter[self]]:
constant[
Return a list of DER coded X.509 public keys from the v3 signature block
]
if compare[name[self]._v3_signing_data equal[==] constant[None]] begin[:]
call[name[self].parse_v3_signing_block, parameter[]]
variable[public_keys] assign[=] list[[]]
for taget[name[signer]] in starred[name[self]._v3_signing_data] begin[:]
call[name[public_keys].append, parameter[name[signer].public_key]]
return[name[public_keys]]
|
keyword[def] identifier[get_public_keys_der_v3] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_v3_signing_data] == keyword[None] :
identifier[self] . identifier[parse_v3_signing_block] ()
identifier[public_keys] =[]
keyword[for] identifier[signer] keyword[in] identifier[self] . identifier[_v3_signing_data] :
identifier[public_keys] . identifier[append] ( identifier[signer] . identifier[public_key] )
keyword[return] identifier[public_keys]
|
def get_public_keys_der_v3(self):
"""
Return a list of DER coded X.509 public keys from the v3 signature block
"""
if self._v3_signing_data == None:
self.parse_v3_signing_block() # depends on [control=['if'], data=[]]
public_keys = []
for signer in self._v3_signing_data:
public_keys.append(signer.public_key) # depends on [control=['for'], data=['signer']]
return public_keys
|
def update_ostree_summary(self, release):
"""Update the ostree summary file and return a path to it"""
self.log.info('Updating the ostree summary for %s', release['name'])
self.mock_chroot(release, release['ostree_summary'])
return os.path.join(release['output_dir'], 'summary')
|
def function[update_ostree_summary, parameter[self, release]]:
constant[Update the ostree summary file and return a path to it]
call[name[self].log.info, parameter[constant[Updating the ostree summary for %s], call[name[release]][constant[name]]]]
call[name[self].mock_chroot, parameter[name[release], call[name[release]][constant[ostree_summary]]]]
return[call[name[os].path.join, parameter[call[name[release]][constant[output_dir]], constant[summary]]]]
|
keyword[def] identifier[update_ostree_summary] ( identifier[self] , identifier[release] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[release] [ literal[string] ])
identifier[self] . identifier[mock_chroot] ( identifier[release] , identifier[release] [ literal[string] ])
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[release] [ literal[string] ], literal[string] )
|
def update_ostree_summary(self, release):
"""Update the ostree summary file and return a path to it"""
self.log.info('Updating the ostree summary for %s', release['name'])
self.mock_chroot(release, release['ostree_summary'])
return os.path.join(release['output_dir'], 'summary')
|
def infer_namespaces(ac):
"""infer possible namespaces of given accession based on syntax
Always returns a list, possibly empty
>>> infer_namespaces("ENST00000530893.6")
['ensembl']
>>> infer_namespaces("ENST00000530893")
['ensembl']
>>> infer_namespaces("ENSQ00000530893")
[]
>>> infer_namespaces("NM_01234")
['refseq']
>>> infer_namespaces("NM_01234.5")
['refseq']
>>> infer_namespaces("NQ_01234.5")
[]
>>> infer_namespaces("A2BC19")
['uniprot']
>>> sorted(infer_namespaces("P12345"))
['insdc', 'uniprot']
>>> infer_namespaces("A0A022YWF9")
['uniprot']
"""
return [v for k, v in ac_namespace_regexps.items() if k.match(ac)]
|
def function[infer_namespaces, parameter[ac]]:
constant[infer possible namespaces of given accession based on syntax
Always returns a list, possibly empty
>>> infer_namespaces("ENST00000530893.6")
['ensembl']
>>> infer_namespaces("ENST00000530893")
['ensembl']
>>> infer_namespaces("ENSQ00000530893")
[]
>>> infer_namespaces("NM_01234")
['refseq']
>>> infer_namespaces("NM_01234.5")
['refseq']
>>> infer_namespaces("NQ_01234.5")
[]
>>> infer_namespaces("A2BC19")
['uniprot']
>>> sorted(infer_namespaces("P12345"))
['insdc', 'uniprot']
>>> infer_namespaces("A0A022YWF9")
['uniprot']
]
return[<ast.ListComp object at 0x7da1b0cfda50>]
|
keyword[def] identifier[infer_namespaces] ( identifier[ac] ):
literal[string]
keyword[return] [ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ac_namespace_regexps] . identifier[items] () keyword[if] identifier[k] . identifier[match] ( identifier[ac] )]
|
def infer_namespaces(ac):
"""infer possible namespaces of given accession based on syntax
Always returns a list, possibly empty
>>> infer_namespaces("ENST00000530893.6")
['ensembl']
>>> infer_namespaces("ENST00000530893")
['ensembl']
>>> infer_namespaces("ENSQ00000530893")
[]
>>> infer_namespaces("NM_01234")
['refseq']
>>> infer_namespaces("NM_01234.5")
['refseq']
>>> infer_namespaces("NQ_01234.5")
[]
>>> infer_namespaces("A2BC19")
['uniprot']
>>> sorted(infer_namespaces("P12345"))
['insdc', 'uniprot']
>>> infer_namespaces("A0A022YWF9")
['uniprot']
"""
return [v for (k, v) in ac_namespace_regexps.items() if k.match(ac)]
|
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True):
"""Perform interpolation on a healpix cube. If egy is None
then interpolation will be performed on the existing energy
planes.
"""
shape = np.broadcast(lon, lat, egy).shape
lon = lon * np.ones(shape)
lat = lat * np.ones(shape)
theta = np.pi / 2. - np.radians(lat)
phi = np.radians(lon)
vals = []
for i, _ in enumerate(self.hpx.evals):
v = hp.pixelfunc.get_interp_val(self.counts[i], theta,
phi, nest=self.hpx.nest)
vals += [np.expand_dims(np.array(v, ndmin=1), -1)]
vals = np.concatenate(vals, axis=-1)
if egy is None:
return vals.T
egy = egy * np.ones(shape)
if interp_log:
xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy))
else:
xvals = utils.val_to_pix(self.hpx.evals, egy)
vals = vals.reshape((-1, vals.shape[-1]))
xvals = np.ravel(xvals)
v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals],
order=1)
return v.reshape(shape)
|
def function[_interpolate_cube, parameter[self, lon, lat, egy, interp_log]]:
constant[Perform interpolation on a healpix cube. If egy is None
then interpolation will be performed on the existing energy
planes.
]
variable[shape] assign[=] call[name[np].broadcast, parameter[name[lon], name[lat], name[egy]]].shape
variable[lon] assign[=] binary_operation[name[lon] * call[name[np].ones, parameter[name[shape]]]]
variable[lat] assign[=] binary_operation[name[lat] * call[name[np].ones, parameter[name[shape]]]]
variable[theta] assign[=] binary_operation[binary_operation[name[np].pi / constant[2.0]] - call[name[np].radians, parameter[name[lat]]]]
variable[phi] assign[=] call[name[np].radians, parameter[name[lon]]]
variable[vals] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da207f9a470>, <ast.Name object at 0x7da207f98940>]]] in starred[call[name[enumerate], parameter[name[self].hpx.evals]]] begin[:]
variable[v] assign[=] call[name[hp].pixelfunc.get_interp_val, parameter[call[name[self].counts][name[i]], name[theta], name[phi]]]
<ast.AugAssign object at 0x7da207f994b0>
variable[vals] assign[=] call[name[np].concatenate, parameter[name[vals]]]
if compare[name[egy] is constant[None]] begin[:]
return[name[vals].T]
variable[egy] assign[=] binary_operation[name[egy] * call[name[np].ones, parameter[name[shape]]]]
if name[interp_log] begin[:]
variable[xvals] assign[=] call[name[utils].val_to_pix, parameter[call[name[np].log, parameter[name[self].hpx.evals]], call[name[np].log, parameter[name[egy]]]]]
variable[vals] assign[=] call[name[vals].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da207f9ba00>, <ast.Subscript object at 0x7da207f98370>]]]]
variable[xvals] assign[=] call[name[np].ravel, parameter[name[xvals]]]
variable[v] assign[=] call[name[map_coordinates], parameter[name[vals], list[[<ast.Call object at 0x7da207f9a5c0>, <ast.Name object at 0x7da207f9b400>]]]]
return[call[name[v].reshape, parameter[name[shape]]]]
|
keyword[def] identifier[_interpolate_cube] ( identifier[self] , identifier[lon] , identifier[lat] , identifier[egy] = keyword[None] , identifier[interp_log] = keyword[True] ):
literal[string]
identifier[shape] = identifier[np] . identifier[broadcast] ( identifier[lon] , identifier[lat] , identifier[egy] ). identifier[shape]
identifier[lon] = identifier[lon] * identifier[np] . identifier[ones] ( identifier[shape] )
identifier[lat] = identifier[lat] * identifier[np] . identifier[ones] ( identifier[shape] )
identifier[theta] = identifier[np] . identifier[pi] / literal[int] - identifier[np] . identifier[radians] ( identifier[lat] )
identifier[phi] = identifier[np] . identifier[radians] ( identifier[lon] )
identifier[vals] =[]
keyword[for] identifier[i] , identifier[_] keyword[in] identifier[enumerate] ( identifier[self] . identifier[hpx] . identifier[evals] ):
identifier[v] = identifier[hp] . identifier[pixelfunc] . identifier[get_interp_val] ( identifier[self] . identifier[counts] [ identifier[i] ], identifier[theta] ,
identifier[phi] , identifier[nest] = identifier[self] . identifier[hpx] . identifier[nest] )
identifier[vals] +=[ identifier[np] . identifier[expand_dims] ( identifier[np] . identifier[array] ( identifier[v] , identifier[ndmin] = literal[int] ),- literal[int] )]
identifier[vals] = identifier[np] . identifier[concatenate] ( identifier[vals] , identifier[axis] =- literal[int] )
keyword[if] identifier[egy] keyword[is] keyword[None] :
keyword[return] identifier[vals] . identifier[T]
identifier[egy] = identifier[egy] * identifier[np] . identifier[ones] ( identifier[shape] )
keyword[if] identifier[interp_log] :
identifier[xvals] = identifier[utils] . identifier[val_to_pix] ( identifier[np] . identifier[log] ( identifier[self] . identifier[hpx] . identifier[evals] ), identifier[np] . identifier[log] ( identifier[egy] ))
keyword[else] :
identifier[xvals] = identifier[utils] . identifier[val_to_pix] ( identifier[self] . identifier[hpx] . identifier[evals] , identifier[egy] )
identifier[vals] = identifier[vals] . identifier[reshape] ((- literal[int] , identifier[vals] . identifier[shape] [- literal[int] ]))
identifier[xvals] = identifier[np] . identifier[ravel] ( identifier[xvals] )
identifier[v] = identifier[map_coordinates] ( identifier[vals] ,[ identifier[np] . identifier[arange] ( identifier[vals] . identifier[shape] [ literal[int] ]), identifier[xvals] ],
identifier[order] = literal[int] )
keyword[return] identifier[v] . identifier[reshape] ( identifier[shape] )
|
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True):
"""Perform interpolation on a healpix cube. If egy is None
then interpolation will be performed on the existing energy
planes.
"""
shape = np.broadcast(lon, lat, egy).shape
lon = lon * np.ones(shape)
lat = lat * np.ones(shape)
theta = np.pi / 2.0 - np.radians(lat)
phi = np.radians(lon)
vals = []
for (i, _) in enumerate(self.hpx.evals):
v = hp.pixelfunc.get_interp_val(self.counts[i], theta, phi, nest=self.hpx.nest)
vals += [np.expand_dims(np.array(v, ndmin=1), -1)] # depends on [control=['for'], data=[]]
vals = np.concatenate(vals, axis=-1)
if egy is None:
return vals.T # depends on [control=['if'], data=[]]
egy = egy * np.ones(shape)
if interp_log:
xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy)) # depends on [control=['if'], data=[]]
else:
xvals = utils.val_to_pix(self.hpx.evals, egy)
vals = vals.reshape((-1, vals.shape[-1]))
xvals = np.ravel(xvals)
v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals], order=1)
return v.reshape(shape)
|
def _left_zero_blocks(self, r):
"""Number of blocks with zeros from the left in block row `r`."""
if not self._include_off_diagonal:
return r
elif not self._upper:
return 0
elif self._include_diagonal:
return r
else:
return r + 1
|
def function[_left_zero_blocks, parameter[self, r]]:
constant[Number of blocks with zeros from the left in block row `r`.]
if <ast.UnaryOp object at 0x7da1b1caf370> begin[:]
return[name[r]]
|
keyword[def] identifier[_left_zero_blocks] ( identifier[self] , identifier[r] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_include_off_diagonal] :
keyword[return] identifier[r]
keyword[elif] keyword[not] identifier[self] . identifier[_upper] :
keyword[return] literal[int]
keyword[elif] identifier[self] . identifier[_include_diagonal] :
keyword[return] identifier[r]
keyword[else] :
keyword[return] identifier[r] + literal[int]
|
def _left_zero_blocks(self, r):
"""Number of blocks with zeros from the left in block row `r`."""
if not self._include_off_diagonal:
return r # depends on [control=['if'], data=[]]
elif not self._upper:
return 0 # depends on [control=['if'], data=[]]
elif self._include_diagonal:
return r # depends on [control=['if'], data=[]]
else:
return r + 1
|
def configurations(self):
"""Configurations from uwsgiconf module."""
if self._confs is not None:
return self._confs
with output_capturing():
module = self.load(self.fpath)
confs = getattr(module, CONFIGS_MODULE_ATTR)
confs = listify(confs)
self._confs = confs
return confs
|
def function[configurations, parameter[self]]:
constant[Configurations from uwsgiconf module.]
if compare[name[self]._confs is_not constant[None]] begin[:]
return[name[self]._confs]
with call[name[output_capturing], parameter[]] begin[:]
variable[module] assign[=] call[name[self].load, parameter[name[self].fpath]]
variable[confs] assign[=] call[name[getattr], parameter[name[module], name[CONFIGS_MODULE_ATTR]]]
variable[confs] assign[=] call[name[listify], parameter[name[confs]]]
name[self]._confs assign[=] name[confs]
return[name[confs]]
|
keyword[def] identifier[configurations] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_confs] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_confs]
keyword[with] identifier[output_capturing] ():
identifier[module] = identifier[self] . identifier[load] ( identifier[self] . identifier[fpath] )
identifier[confs] = identifier[getattr] ( identifier[module] , identifier[CONFIGS_MODULE_ATTR] )
identifier[confs] = identifier[listify] ( identifier[confs] )
identifier[self] . identifier[_confs] = identifier[confs]
keyword[return] identifier[confs]
|
def configurations(self):
"""Configurations from uwsgiconf module."""
if self._confs is not None:
return self._confs # depends on [control=['if'], data=[]]
with output_capturing():
module = self.load(self.fpath)
confs = getattr(module, CONFIGS_MODULE_ATTR)
confs = listify(confs) # depends on [control=['with'], data=[]]
self._confs = confs
return confs
|
def register_mark(key=None):
"""Returns a decorator registering a mark class in the mark type registry.
If no key is provided, the class name is used as a key. A key is provided
for each core bqplot mark so that the frontend can use
this key regardless of the kernel language.
"""
def wrap(mark):
name = key if key is not None else mark.__module__ + mark.__name__
Mark.mark_types[name] = mark
return mark
return wrap
|
def function[register_mark, parameter[key]]:
constant[Returns a decorator registering a mark class in the mark type registry.
If no key is provided, the class name is used as a key. A key is provided
for each core bqplot mark so that the frontend can use
this key regardless of the kernel language.
]
def function[wrap, parameter[mark]]:
variable[name] assign[=] <ast.IfExp object at 0x7da1b1f39e70>
call[name[Mark].mark_types][name[name]] assign[=] name[mark]
return[name[mark]]
return[name[wrap]]
|
keyword[def] identifier[register_mark] ( identifier[key] = keyword[None] ):
literal[string]
keyword[def] identifier[wrap] ( identifier[mark] ):
identifier[name] = identifier[key] keyword[if] identifier[key] keyword[is] keyword[not] keyword[None] keyword[else] identifier[mark] . identifier[__module__] + identifier[mark] . identifier[__name__]
identifier[Mark] . identifier[mark_types] [ identifier[name] ]= identifier[mark]
keyword[return] identifier[mark]
keyword[return] identifier[wrap]
|
def register_mark(key=None):
"""Returns a decorator registering a mark class in the mark type registry.
If no key is provided, the class name is used as a key. A key is provided
for each core bqplot mark so that the frontend can use
this key regardless of the kernel language.
"""
def wrap(mark):
name = key if key is not None else mark.__module__ + mark.__name__
Mark.mark_types[name] = mark
return mark
return wrap
|
def survival_rate(work_db):
"""Calcuate the survival rate for the results in a WorkDB.
"""
kills = sum(r.is_killed for _, r in work_db.results)
num_results = work_db.num_results
if not num_results:
return 0
return (1 - kills / num_results) * 100
|
def function[survival_rate, parameter[work_db]]:
constant[Calcuate the survival rate for the results in a WorkDB.
]
variable[kills] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b078b1c0>]]
variable[num_results] assign[=] name[work_db].num_results
if <ast.UnaryOp object at 0x7da1b078ba30> begin[:]
return[constant[0]]
return[binary_operation[binary_operation[constant[1] - binary_operation[name[kills] / name[num_results]]] * constant[100]]]
|
keyword[def] identifier[survival_rate] ( identifier[work_db] ):
literal[string]
identifier[kills] = identifier[sum] ( identifier[r] . identifier[is_killed] keyword[for] identifier[_] , identifier[r] keyword[in] identifier[work_db] . identifier[results] )
identifier[num_results] = identifier[work_db] . identifier[num_results]
keyword[if] keyword[not] identifier[num_results] :
keyword[return] literal[int]
keyword[return] ( literal[int] - identifier[kills] / identifier[num_results] )* literal[int]
|
def survival_rate(work_db):
"""Calcuate the survival rate for the results in a WorkDB.
"""
kills = sum((r.is_killed for (_, r) in work_db.results))
num_results = work_db.num_results
if not num_results:
return 0 # depends on [control=['if'], data=[]]
return (1 - kills / num_results) * 100
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.